filename
stringlengths
3
9
code
stringlengths
6
1.09M
225223.c
/* inftrees.c -- generate Huffman trees for efficient decoding * Copyright (C) 1995-2002 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* $Id: inftrees.c 14574 2005-10-29 16:27:43Z bonefish $ */ #include "zutil.h" #include "inftrees.h" #if !defined(BUILDFIXED) && !defined(STDC) # define BUILDFIXED /* non ANSI compilers may not accept inffixed.h */ #endif const char inflate_copyright[] = " inflate 1.1.4 Copyright 1995-2002 Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot include such an acknowledgment, I would appreciate that you keep this copyright string in the executable of your product. */ /* PDFlib GmbH: conflicts with Visual Studio.NET struct internal_state {int dummy;}; *//* for buggy compilers */ /* simplify the use of the inflate_huft type with some defines */ #define exop word.what.Exop #define bits word.what.Bits local int huft_build OF(( uIntf *, /* code lengths in bits */ uInt, /* number of codes */ uInt, /* number of "simple" codes */ const uIntf *, /* list of base values for non-simple codes */ const uIntf *, /* list of extra bits for non-simple codes */ inflate_huft * FAR*,/* result: starting table */ uIntf *, /* maximum lookup bits (returns actual) */ inflate_huft *, /* space for trees */ uInt *, /* hufts used in space */ uIntf * )); /* space for values */ /* Tables for deflate from PKZIP's appnote.txt. */ local const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; /* see note #13 above about 258 */ local const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */ local const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577}; local const uInt cpdext[30] = { /* Extra bits for distance codes */ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; /* Huffman code decoding is performed using a multi-level table lookup. The fastest way to decode is to simply build a lookup table whose size is determined by the longest code. However, the time it takes to build this table can also be a factor if the data being decoded is not very long. The most common codes are necessarily the shortest codes, so those codes dominate the decoding time, and hence the speed. The idea is you can have a shorter table that decodes the shorter, more probable codes, and then point to subsidiary tables for the longer codes. The time it costs to decode the longer codes is then traded against the time it takes to make longer tables. This results of this trade are in the variables lbits and dbits below. lbits is the number of bits the first level table for literal/ length codes can decode in one step, and dbits is the same thing for the distance codes. Subsequent tables are also less than or equal to those sizes. These values may be adjusted either when all of the codes are shorter than that, in which case the longest code length in bits is used, or when the shortest code is *longer* than the requested table size, in which case the length of the shortest code in bits is used. There are two different values for the two tables, since they code a different number of possibilities each. The literal/length table codes 286 possible values, or in a flat code, a little over eight bits. The distance table codes 30 possible values, or a little less than five bits, flat. The optimum values for speed end up being about one bit more than those, so lbits is 8+1 and dbits is 5+1. The optimum values may differ though from machine to machine, and possibly even between compilers. Your mileage may vary. */ /* If BMAX needs to be larger than 16, then h and x[] should be uLong. */ #define BMAX 15 /* maximum bit length of any code */ local int huft_build( uIntf *b, /* code lengths in bits (all assumed <= BMAX) */ uInt n, /* number of codes (assumed <= 288) */ uInt s, /* number of simple-valued codes (0..s-1) */ const uIntf *d, /* list of base values for non-simple codes */ const uIntf *e, /* list of extra bits for non-simple codes */ inflate_huft * FAR *t, /* result: starting table */ uIntf *m, /* maximum lookup bits, returns actual */ inflate_huft *hp, /* space for trees */ uInt *hn, /* hufts used in space */ uIntf *v) /* working area: values in order of bit length */ /* Given a list of code lengths and a maximum table size, make a set of tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR if the given code set is incomplete (the tables are still built in this case), or Z_DATA_ERROR if the input is invalid. */ { uInt a; /* counter for codes of length k */ uInt c[BMAX+1]; /* bit length count table */ uInt f; /* i repeats in table every f entries */ int g; /* maximum code length */ int h; /* table level */ register uInt i; /* counter, current code */ register uInt j; /* counter */ register int k; /* number of bits in current code */ int l; /* bits per table (returned in m) */ uInt mask; /* (1 << w) - 1, to avoid cc -O bug on HP */ register uIntf *p; /* pointer into c[], b[], or v[] */ inflate_huft *q; /* points to current table */ struct inflate_huft_s r; /* table entry for structure assignment */ inflate_huft *u[BMAX]; /* table stack */ register int w; /* bits before this table == (l * h) */ uInt x[BMAX+1]; /* bit offsets, then code stack */ uIntf *xp; /* pointer into x */ int y; /* number of dummy codes added */ uInt z; /* number of entries in current table */ /* Generate counts for each bit length */ p = c; #define C0 *p++ = 0; #define C2 C0 C0 C0 C0 #define C4 C2 C2 C2 C2 C4 /* clear c[]--assume BMAX+1 is 16 */ p = b; i = n; do { c[*p++]++; /* assume all entries <= BMAX */ } while (--i); if (c[0] == n) /* null input--all zero length codes */ { *t = (inflate_huft *)Z_NULL; *m = 0; return Z_OK; } /* Find minimum and maximum length, bound *m by those */ l = *m; for (j = 1; j <= BMAX; j++) if (c[j]) break; k = j; /* minimum code length */ if ((uInt)l < j) l = j; for (i = BMAX; i; i--) if (c[i]) break; g = i; /* maximum code length */ if ((uInt)l > i) l = i; *m = l; /* Adjust last length count to fill out codes, if needed */ for (y = 1 << j; j < i; j++, y <<= 1) if ((y -= c[j]) < 0) return Z_DATA_ERROR; if ((y -= c[i]) < 0) return Z_DATA_ERROR; c[i] += y; /* Generate starting offsets into the value table for each length */ x[1] = j = 0; p = c + 1; xp = x + 2; while (--i) { /* note that i == g from above */ *xp++ = (j += *p++); } /* Make a table of values in order of bit lengths */ p = b; i = 0; do { if ((j = *p++) != 0) v[x[j]++] = i; } while (++i < n); n = x[g]; /* set n to length of v */ /* Generate the Huffman codes and for each, make the table entries */ x[0] = i = 0; /* first Huffman code is zero */ p = v; /* grab values in bit order */ h = -1; /* no tables yet--level -1 */ w = -l; /* bits decoded == (l * h) */ u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */ q = (inflate_huft *)Z_NULL; /* ditto */ z = 0; /* ditto */ /* go through the bit lengths (k already is bits in shortest code) */ for (; k <= g; k++) { a = c[k]; while (a--) { /* here i is the Huffman code of length k bits for value *p */ /* make tables up to required level */ while (k > w + l) { h++; w += l; /* previous table always l bits */ /* compute minimum size table less than or equal to l bits */ z = g - w; z = z > (uInt)l ? l : z; /* table size upper limit */ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ { /* too few codes for k-w bit table */ f -= a + 1; /* deduct codes from patterns left */ xp = c + k; if (j < z) while (++j < z) /* try smaller tables up to z bits */ { if ((f <<= 1) <= *++xp) break; /* enough codes to use up j bits */ f -= *xp; /* else deduct codes from patterns */ } } z = 1 << j; /* table entries for j-bit table */ /* allocate new table */ if (*hn + z > MANY) /* (note: doesn't matter for fixed) */ return Z_DATA_ERROR; /* overflow of MANY */ u[h] = q = hp + *hn; *hn += z; /* connect to last table, if there is one */ if (h) { x[h] = i; /* save pattern for backing up */ r.bits = (Byte)l; /* bits to dump before this table */ r.exop = (Byte)j; /* bits in this table */ j = i >> (w - l); r.base = (uInt)(q - u[h-1] - j); /* offset to this table */ u[h-1][j] = r; /* connect to last table */ } else *t = q; /* first table is returned result */ } /* set up table entry in r */ r.bits = (Byte)(k - w); if (p >= v + n) r.exop = 128 + 64; /* out of values--invalid code */ else if (*p < s) { r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */ r.base = *p++; /* simple code is just the value */ } else { r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */ r.base = d[*p++ - s]; } /* fill code-like entries with r */ f = 1 << (k - w); for (j = i >> w; j < z; j += f) q[j] = r; /* backwards increment the k-bit code i */ for (j = 1 << (k - 1); i & j; j >>= 1) i ^= j; i ^= j; /* backup over finished tables */ mask = (1 << w) - 1; /* needed on HP, cc -O bug */ while ((i & mask) != x[h]) { h--; /* don't need to update q */ w -= l; mask = (1 << w) - 1; } } } /* Return Z_BUF_ERROR if we were given an incomplete table */ return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK; } int inflate_trees_bits( uIntf *c, /* 19 code lengths */ uIntf *bb, /* bits tree desired/actual depth */ inflate_huft * FAR *tb, /* bits tree result */ inflate_huft *hp, /* space for trees */ z_streamp z) /* for messages */ { int r; uInt hn = 0; /* hufts used in space */ uIntf *v; /* work area for huft_build */ if ((v = (uIntf*)ZALLOC(z, 19, sizeof(uInt))) == Z_NULL) return Z_MEM_ERROR; r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, hp, &hn, v); if (r == Z_DATA_ERROR) z->msg = (char*)"oversubscribed dynamic bit lengths tree"; else if (r == Z_BUF_ERROR || *bb == 0) { z->msg = (char*)"incomplete dynamic bit lengths tree"; r = Z_DATA_ERROR; } ZFREE(z, v); return r; } int inflate_trees_dynamic( uInt nl, /* number of literal/length codes */ uInt nd, /* number of distance codes */ uIntf *c, /* that many (total) code lengths */ uIntf *bl, /* literal desired/actual bit depth */ uIntf *bd, /* distance desired/actual bit depth */ inflate_huft * FAR *tl, /* literal/length tree result */ inflate_huft * FAR *td, /* distance tree result */ inflate_huft *hp, /* space for trees */ z_streamp z) /* for messages */ { int r; uInt hn = 0; /* hufts used in space */ uIntf *v; /* work area for huft_build */ /* allocate work area */ if ((v = (uIntf*)ZALLOC(z, 288, sizeof(uInt))) == Z_NULL) return Z_MEM_ERROR; /* build literal/length tree */ r = huft_build(c, nl, 257, cplens, cplext, tl, bl, hp, &hn, v); if (r != Z_OK || *bl == 0) { if (r == Z_DATA_ERROR) z->msg = (char*)"oversubscribed literal/length tree"; else if (r != Z_MEM_ERROR) { z->msg = (char*)"incomplete literal/length tree"; r = Z_DATA_ERROR; } ZFREE(z, v); return r; } /* build distance tree */ r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, hp, &hn, v); if (r != Z_OK || (*bd == 0 && nl > 257)) { if (r == Z_DATA_ERROR) z->msg = (char*)"oversubscribed distance tree"; else if (r == Z_BUF_ERROR) { #ifdef PKZIP_BUG_WORKAROUND r = Z_OK; } #else z->msg = (char*)"incomplete distance tree"; r = Z_DATA_ERROR; } else if (r != Z_MEM_ERROR) { z->msg = (char*)"empty distance tree with lengths"; r = Z_DATA_ERROR; } ZFREE(z, v); return r; #endif } /* done */ ZFREE(z, v); return Z_OK; } /* build fixed tables only once--keep them here */ #ifdef BUILDFIXED local int fixed_built = 0; #define FIXEDH 544 /* number of hufts used by fixed tables */ local inflate_huft fixed_mem[FIXEDH]; local uInt fixed_bl; local uInt fixed_bd; local inflate_huft *fixed_tl; local inflate_huft *fixed_td; #else #include "inffixed.h" #endif int inflate_trees_fixed( uIntf *bl, /* literal desired/actual bit depth */ uIntf *bd, /* distance desired/actual bit depth */ inflate_huft * FAR *tl, /* literal/length tree result */ inflate_huft * FAR *td, /* distance tree result */ z_streamp z) /* for memory allocation */ { #ifdef BUILDFIXED /* build fixed tables if not already */ if (!fixed_built) { int k; /* temporary variable */ uInt f = 0; /* number of hufts used in fixed_mem */ uIntf *c; /* length list for huft_build */ uIntf *v; /* work area for huft_build */ /* allocate memory */ if ((c = (uIntf*)ZALLOC(z, 288, sizeof(uInt))) == Z_NULL) return Z_MEM_ERROR; if ((v = (uIntf*)ZALLOC(z, 288, sizeof(uInt))) == Z_NULL) { ZFREE(z, c); return Z_MEM_ERROR; } /* literal table */ for (k = 0; k < 144; k++) c[k] = 8; for (; k < 256; k++) c[k] = 9; for (; k < 280; k++) c[k] = 7; for (; k < 288; k++) c[k] = 8; fixed_bl = 9; huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, fixed_mem, &f, v); /* distance table */ for (k = 0; k < 30; k++) c[k] = 5; fixed_bd = 5; huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, fixed_mem, &f, v); /* done */ ZFREE(z, v); ZFREE(z, c); fixed_built = 1; } #endif *bl = fixed_bl; *bd = fixed_bd; *tl = fixed_tl; *td = fixed_td; return Z_OK; }
202565.c
/* * Toshiba rbtx4927 specific setup * * Author: MontaVista Software, Inc. * source@mvista.com * * Copyright 2001-2002 MontaVista Software Inc. * * Copyright (C) 1996, 97, 2001, 04 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2000 RidgeRun, Inc. * Author: RidgeRun, Inc. * glonnon@ridgerun.com, skranz@ridgerun.com, stevej@ridgerun.com * * Copyright 2001 MontaVista Software Inc. * Author: jsun@mvista.com or jsun@junsun.net * * Copyright 2002 MontaVista Software Inc. * Author: Michael Pruznick, michael_pruznick@mvista.com * * Copyright (C) 2000-2001 Toshiba Corporation * * Copyright (C) 2004 MontaVista Software Inc. * Author: Manish Lachwani, mlachwani@mvista.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/leds.h> #include <asm/io.h> #include <asm/reboot.h> #include <asm/txx9/generic.h> #include <asm/txx9/pci.h> #include <asm/txx9/rbtx4927.h> #include <asm/txx9/tx4938.h> /* for TX4937 */ #ifdef CONFIG_PCI static void __init tx4927_pci_setup(void) { int extarb = !(__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCIARB); struct pci_controller *c = &txx9_primary_pcic; register_pci_controller(c); if (__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66) txx9_pci_option = (txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) | TXX9_PCI_OPT_CLK_66; /* already configured */ /* Reset PCI Bus */ writeb(1, rbtx4927_pcireset_addr); /* Reset PCIC */ txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) == TXX9_PCI_OPT_CLK_66) tx4927_pciclk66_setup(); mdelay(10); /* clear PCIC reset */ txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); writeb(0, rbtx4927_pcireset_addr); iob(); tx4927_report_pciclk(); tx4927_pcic_setup(tx4927_pcicptr, c, extarb); if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) == TXX9_PCI_OPT_CLK_AUTO && txx9_pci66_check(c, 0, 0)) { /* Reset PCI Bus */ writeb(1, rbtx4927_pcireset_addr); /* Reset PCIC */ txx9_set64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); tx4927_pciclk66_setup(); mdelay(10); /* clear PCIC reset */ txx9_clear64(&tx4927_ccfgptr->clkctr, TX4927_CLKCTR_PCIRST); writeb(0, rbtx4927_pcireset_addr); iob(); /* Reinitialize PCIC */ tx4927_report_pciclk(); tx4927_pcic_setup(tx4927_pcicptr, c, extarb); } tx4927_setup_pcierr_irq(); } static void __init tx4937_pci_setup(void) { int extarb = !(__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIARB); struct pci_controller *c = &txx9_primary_pcic; register_pci_controller(c); if (__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66) txx9_pci_option = (txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) | TXX9_PCI_OPT_CLK_66; /* already configured */ /* Reset PCI Bus */ writeb(1, rbtx4927_pcireset_addr); /* Reset PCIC */ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST); if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) == TXX9_PCI_OPT_CLK_66) tx4938_pciclk66_setup(); mdelay(10); /* clear PCIC reset */ txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST); writeb(0, rbtx4927_pcireset_addr); iob(); tx4938_report_pciclk(); tx4927_pcic_setup(tx4938_pcicptr, c, extarb); if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) == TXX9_PCI_OPT_CLK_AUTO && txx9_pci66_check(c, 0, 0)) { /* Reset PCI Bus */ writeb(1, rbtx4927_pcireset_addr); /* Reset PCIC */ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST); tx4938_pciclk66_setup(); mdelay(10); /* clear PCIC reset */ txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST); writeb(0, rbtx4927_pcireset_addr); iob(); /* Reinitialize PCIC */ tx4938_report_pciclk(); tx4927_pcic_setup(tx4938_pcicptr, c, extarb); } tx4938_setup_pcierr_irq(); } static void __init rbtx4927_arch_init(void) { tx4927_pci_setup(); } static void __init rbtx4937_arch_init(void) { tx4937_pci_setup(); } #else #define rbtx4927_arch_init NULL #define rbtx4937_arch_init NULL #endif /* CONFIG_PCI */ static void toshiba_rbtx4927_restart(char *command) { /* enable the s/w reset register */ writeb(1, rbtx4927_softresetlock_addr); /* wait for enable to be seen */ while (!(readb(rbtx4927_softresetlock_addr) & 1)) ; /* do a s/w reset */ writeb(1, rbtx4927_softreset_addr); /* fallback */ (*_machine_halt)(); } static void __init rbtx4927_clock_init(void); static void __init rbtx4937_clock_init(void); static void __init rbtx4927_mem_setup(void) { char *argptr; if (TX4927_REV_PCODE() == 0x4927) { rbtx4927_clock_init(); tx4927_setup(); } else { rbtx4937_clock_init(); tx4938_setup(); } _machine_restart = toshiba_rbtx4927_restart; #ifdef CONFIG_PCI txx9_alloc_pci_controller(&txx9_primary_pcic, RBTX4927_PCIMEM, RBTX4927_PCIMEM_SIZE, RBTX4927_PCIIO, RBTX4927_PCIIO_SIZE); txx9_board_pcibios_setup = tx4927_pcibios_setup; #else set_io_port_base(KSEG1 + RBTX4927_ISA_IO_OFFSET); #endif /* TX4927-SIO DTR on (PIO[15]) */ gpio_request(15, "sio-dtr"); gpio_direction_output(15, 1); tx4927_sio_init(0, 0); #ifdef CONFIG_SERIAL_TXX9_CONSOLE argptr = prom_getcmdline(); if (!strstr(argptr, "console=")) strcat(argptr, " console=ttyS0,38400"); #endif } static void __init rbtx4927_clock_init(void) { /* * ASSUMPTION: PCIDIVMODE is configured for PCI 33MHz or 66MHz. * * For TX4927: * PCIDIVMODE[12:11]'s initial value is given by S9[4:3] (ON:0, OFF:1). * CPU 166MHz: PCI 66MHz : PCIDIVMODE: 00 (1/2.5) * CPU 200MHz: PCI 66MHz : PCIDIVMODE: 01 (1/3) * CPU 166MHz: PCI 33MHz : PCIDIVMODE: 10 (1/5) * CPU 200MHz: PCI 33MHz : PCIDIVMODE: 11 (1/6) * i.e. S9[3]: ON (83MHz), OFF (100MHz) */ switch ((unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCIDIVMODE_MASK) { case TX4927_CCFG_PCIDIVMODE_2_5: case TX4927_CCFG_PCIDIVMODE_5: txx9_cpu_clock = 166666666; /* 166MHz */ break; default: txx9_cpu_clock = 200000000; /* 200MHz */ } } static void __init rbtx4937_clock_init(void) { /* * ASSUMPTION: PCIDIVMODE is configured for PCI 33MHz or 66MHz. * * For TX4937: * PCIDIVMODE[12:11]'s initial value is given by S1[5:4] (ON:0, OFF:1) * PCIDIVMODE[10] is 0. * CPU 266MHz: PCI 33MHz : PCIDIVMODE: 000 (1/8) * CPU 266MHz: PCI 66MHz : PCIDIVMODE: 001 (1/4) * CPU 300MHz: PCI 33MHz : PCIDIVMODE: 010 (1/9) * CPU 300MHz: PCI 66MHz : PCIDIVMODE: 011 (1/4.5) * CPU 333MHz: PCI 33MHz : PCIDIVMODE: 100 (1/10) * CPU 333MHz: PCI 66MHz : PCIDIVMODE: 101 (1/5) */ switch ((unsigned long)__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIDIVMODE_MASK) { case TX4938_CCFG_PCIDIVMODE_8: case TX4938_CCFG_PCIDIVMODE_4: txx9_cpu_clock = 266666666; /* 266MHz */ break; case TX4938_CCFG_PCIDIVMODE_9: case TX4938_CCFG_PCIDIVMODE_4_5: txx9_cpu_clock = 300000000; /* 300MHz */ break; default: txx9_cpu_clock = 333333333; /* 333MHz */ } } static void __init rbtx4927_time_init(void) { tx4927_time_init(0); } static void __init toshiba_rbtx4927_rtc_init(void) { struct resource res = { .start = RBTX4927_BRAMRTC_BASE - IO_BASE, .end = RBTX4927_BRAMRTC_BASE - IO_BASE + 0x800 - 1, .flags = IORESOURCE_MEM, }; platform_device_register_simple("rtc-ds1742", -1, &res, 1); } static void __init rbtx4927_ne_init(void) { struct resource res[] = { { .start = RBTX4927_RTL_8019_BASE, .end = RBTX4927_RTL_8019_BASE + 0x20 - 1, .flags = IORESOURCE_IO, }, { .start = RBTX4927_RTL_8019_IRQ, .flags = IORESOURCE_IRQ, } }; platform_device_register_simple("ne", -1, res, ARRAY_SIZE(res)); } static void __init rbtx4927_mtd_init(void) { int i; for (i = 0; i < 2; i++) tx4927_mtd_init(i); } static void __init rbtx4927_gpioled_init(void) { static struct gpio_led leds[] = { { .name = "gpioled:green:0", .gpio = 0, .active_low = 1, }, { .name = "gpioled:green:1", .gpio = 1, .active_low = 1, }, }; static struct gpio_led_platform_data pdata = { .num_leds = ARRAY_SIZE(leds), .leds = leds, }; struct platform_device *pdev = platform_device_alloc("leds-gpio", 0); if (!pdev) return; pdev->dev.platform_data = &pdata; if (platform_device_add(pdev)) platform_device_put(pdev); } static void __init rbtx4927_device_init(void) { toshiba_rbtx4927_rtc_init(); rbtx4927_ne_init(); tx4927_wdt_init(); rbtx4927_mtd_init(); if (TX4927_REV_PCODE() == 0x4927) { tx4927_dmac_init(2); tx4927_aclc_init(0, 1); } else { tx4938_dmac_init(0, 2); tx4938_aclc_init(); } platform_device_register_simple("txx9aclc-generic", -1, NULL, 0); txx9_iocled_init(RBTX4927_LED_ADDR - IO_BASE, -1, 3, 1, "green", NULL); rbtx4927_gpioled_init(); } struct txx9_board_vec rbtx4927_vec __initdata = { .system = "Toshiba RBTX4927", .prom_init = rbtx4927_prom_init, .mem_setup = rbtx4927_mem_setup, .irq_setup = rbtx4927_irq_setup, .time_init = rbtx4927_time_init, .device_init = rbtx4927_device_init, .arch_init = rbtx4927_arch_init, #ifdef CONFIG_PCI .pci_map_irq = rbtx4927_pci_map_irq, #endif }; struct txx9_board_vec rbtx4937_vec __initdata = { .system = "Toshiba RBTX4937", .prom_init = rbtx4927_prom_init, .mem_setup = rbtx4927_mem_setup, .irq_setup = rbtx4927_irq_setup, .time_init = rbtx4927_time_init, .device_init = rbtx4927_device_init, .arch_init = rbtx4937_arch_init, #ifdef CONFIG_PCI .pci_map_irq = rbtx4927_pci_map_irq, #endif };
898616.c
/**************************************************************************** * arch/arm/src/kl/kl_dumpgpio.c * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. The * ASF licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> /* Output debug info even if debug output is not selected. */ #undef CONFIG_DEBUG_INFO #define CONFIG_DEBUG_INFO 1 #include <sys/types.h> #include <assert.h> #include <debug.h> #include <nuttx/irq.h> #include "arm_internal.h" #include "chip.h" #include "kl_gpio.h" #ifdef CONFIG_DEBUG_FEATURES /**************************************************************************** * Private Data ****************************************************************************/ /* Port letters for prettier debug output */ static const char g_portchar[KL_GPIO_NPORTS] = { #if KL_GPIO_NPORTS > 9 # error "Additional support required for this number of GPIOs" #elif KL_GPIO_NPORTS > 8 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I' #elif KL_GPIO_NPORTS > 7 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H' #elif KL_GPIO_NPORTS > 6 'A', 'B', 'C', 'D', 'E', 'F', 'G' #elif KL_GPIO_NPORTS > 5 'A', 'B', 'C', 'D', 'E', 'F' #elif KL_GPIO_NPORTS > 4 'A', 'B', 'C', 'D', 'E' #elif KL_GPIO_NPORTS > 3 'A', 'B', 'C', 'D' #elif KL_GPIO_NPORTS > 2 'A', 'B', 'C' #elif KL_GPIO_NPORTS > 1 'A', 'B' #elif KL_GPIO_NPORTS > 0 'A' #else # error "Bad number of GPIOs" #endif }; /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Function: kl_dumpgpio * * Description: * Dump all GPIO registers associated with the provided pin description * along with a descriptive message. * ****************************************************************************/ void kl_dumpgpio(gpio_cfgset_t pinset, const char *msg) { irqstate_t flags; uintptr_t base; int port; /* Decode the port and pin. Use the port number to get the GPIO base * address. */ port = (pinset & _PIN_PORT_MASK) >> _PIN_PORT_SHIFT; DEBUGASSERT((unsigned)port < KL_GPIO_NPORTS); base = KL_GPIO_BASE(port); /* The following requires exclusive access to the GPIO registers */ flags = enter_critical_section(); _info("GPIO%c pinset: %08x base: %08x -- %s\n", g_portchar[port], pinset, base, msg); _info(" PDOR: %08x PDIR: %08x PDDR: %08x\n", getreg32(base + KL_GPIO_PDOR_OFFSET), getreg32(base + KL_GPIO_PDIR_OFFSET), getreg32(base + KL_GPIO_PDDR_OFFSET)); leave_critical_section(flags); } #endif /* CONFIG_DEBUG_FEATURES */
473915.c
static void draw_triangle_tex1555(VERTEX v1, VERTEX v2, VERTEX v3) { int x, y; struct poly_vertex vert[3]; const struct poly_scanline_data *scans; v1.z = (1.0 / v1.z) * ZBUFFER_SCALE; v2.z = (1.0 / v2.z) * ZBUFFER_SCALE; v3.z = (1.0 / v3.z) * ZBUFFER_SCALE; v1.u = (UINT32)((UINT64)(v1.u * v1.z) >> ZDIVIDE_SHIFT); v1.v = (UINT32)((UINT64)(v1.v * v1.z) >> ZDIVIDE_SHIFT); v2.u = (UINT32)((UINT64)(v2.u * v2.z) >> ZDIVIDE_SHIFT); v2.v = (UINT32)((UINT64)(v2.v * v2.z) >> ZDIVIDE_SHIFT); v3.u = (UINT32)((UINT64)(v3.u * v3.z) >> ZDIVIDE_SHIFT); v3.v = (UINT32)((UINT64)(v3.v * v3.z) >> ZDIVIDE_SHIFT); vert[0].x = v1.x; vert[0].y = v1.y; vert[0].p[0] = (UINT32)v1.z; vert[0].p[1] = v1.u; vert[0].p[2] = v1.v; vert[1].x = v2.x; vert[1].y = v2.y; vert[1].p[0] = (UINT32)v2.z; vert[1].p[1] = v2.u; vert[1].p[2] = v2.v; vert[2].x = v3.x; vert[2].y = v3.y; vert[2].p[0] = (UINT32)v3.z; vert[2].p[1] = v3.u; vert[2].p[2] = v3.v; scans = setup_triangle_3(&vert[0], &vert[1], &vert[2], &clip3d); if(scans) { INT64 du, dv, dz; dz = scans->dp[0]; du = scans->dp[1]; dv = scans->dp[2]; for(y = scans->sy; y <= scans->ey; y++) { int x1, x2; INT64 u, v, z; INT64 u2, v2; const struct poly_scanline *scan = &scans->scanline[y - scans->sy]; UINT16 *p = BITMAP_ADDR16(bitmap3d, y, 0); UINT32 *d = BITMAP_ADDR32(zbuffer, y, 0); x1 = scan->sx; x2 = scan->ex; z = scans->scanline[y - scans->sy].p[0]; u = scans->scanline[y - scans->sy].p[1]; v = scans->scanline[y - scans->sy].p[2]; for(x = x1; x <= x2; x++) { // UINT16 pix; int iu, iv; UINT32 iz = z >> 16; if (iz) { u2 = (u << ZDIVIDE_SHIFT) / iz; v2 = (v << ZDIVIDE_SHIFT) / iz; } else { u2 = 0; v2 = 0; } iz |= viewport_priority; if(iz > d[x]) { iu = texture_u_table[(u2 >> texture_coord_shift) & texture_width_mask]; iv = texture_v_table[(v2 >> texture_coord_shift) & texture_height_mask]; #if BILINEAR { int iu2 = texture_u_table[((u2 >> texture_coord_shift) + 1) & texture_width_mask]; int iv2 = texture_v_table[((v2 >> texture_coord_shift) + 1) & texture_height_mask]; UINT32 sr[4], sg[4], sb[4]; UINT32 ur[2], ug[2], ub[2]; UINT32 fr, fg, fb; UINT16 pix0 = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu)]; UINT16 pix1 = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu2)]; UINT16 pix2 = texture_ram[texture_page][(texture_y+iv2) * 2048 + (texture_x+iu)]; UINT16 pix3 = texture_ram[texture_page][(texture_y+iv2) * 2048 + (texture_x+iu2)]; int u_sub1 = (u2 >> (texture_coord_shift-16)) & 0xffff; int v_sub1 = (v2 >> (texture_coord_shift-16)) & 0xffff; int u_sub0 = 0xffff - u_sub1; int v_sub0 = 0xffff - v_sub1; sr[0] = (pix0 & 0x7c00); sg[0] = (pix0 & 0x03e0); sb[0] = (pix0 & 0x001f); sr[1] = (pix1 & 0x7c00); sg[1] = (pix1 & 0x03e0); sb[1] = (pix1 & 0x001f); sr[2] = (pix2 & 0x7c00); sg[2] = (pix2 & 0x03e0); sb[2] = (pix2 & 0x001f); sr[3] = (pix3 & 0x7c00); sg[3] = (pix3 & 0x03e0); sb[3] = (pix3 & 0x001f); /* Calculate weighted U-samples */ ur[0] = (((sr[0] * u_sub0) >> 16) + ((sr[1] * u_sub1) >> 16)); ug[0] = (((sg[0] * u_sub0) >> 16) + ((sg[1] * u_sub1) >> 16)); ub[0] = (((sb[0] * u_sub0) >> 16) + ((sb[1] * u_sub1) >> 16)); ur[1] = (((sr[2] * u_sub0) >> 16) + ((sr[3] * u_sub1) >> 16)); ug[1] = (((sg[2] * u_sub0) >> 16) + ((sg[3] * u_sub1) >> 16)); ub[1] = (((sb[2] * u_sub0) >> 16) + ((sb[3] * u_sub1) >> 16)); /* Calculate the final sample */ fr = (((ur[0] * v_sub0) >> 16) + ((ur[1] * v_sub1) >> 16)); fg = (((ug[0] * v_sub0) >> 16) + ((ug[1] * v_sub1) >> 16)); fb = (((ub[0] * v_sub0) >> 16) + ((ub[1] * v_sub1) >> 16)); // apply intensity fr = (fr * polygon_intensity) >> 8; fg = (fg * polygon_intensity) >> 8; fb = (fb * polygon_intensity) >> 8; p[x] = (fr & 0x7c00) | (fg & 0x3e0) | (fb & 0x1f); } #else pix = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu)]; p[x] = pix & 0x7fff; #endif d[x] = iz; /* write new zbuffer value */ } z += dz; u += du; v += dv; } } } } static void draw_triangle_tex1555_trans(VERTEX v1, VERTEX v2, VERTEX v3) { int x, y; struct poly_vertex vert[3]; const struct poly_scanline_data *scans; v1.z = (1.0 / v1.z) * ZBUFFER_SCALE; v2.z = (1.0 / v2.z) * ZBUFFER_SCALE; v3.z = (1.0 / v3.z) * ZBUFFER_SCALE; v1.u = (UINT32)((UINT64)(v1.u * v1.z) >> ZDIVIDE_SHIFT); v1.v = (UINT32)((UINT64)(v1.v * v1.z) >> ZDIVIDE_SHIFT); v2.u = (UINT32)((UINT64)(v2.u * v2.z) >> ZDIVIDE_SHIFT); v2.v = (UINT32)((UINT64)(v2.v * v2.z) >> ZDIVIDE_SHIFT); v3.u = (UINT32)((UINT64)(v3.u * v3.z) >> ZDIVIDE_SHIFT); v3.v = (UINT32)((UINT64)(v3.v * v3.z) >> ZDIVIDE_SHIFT); vert[0].x = v1.x; vert[0].y = v1.y; vert[0].p[0] = (UINT32)v1.z; vert[0].p[1] = v1.u; vert[0].p[2] = v1.v; vert[1].x = v2.x; vert[1].y = v2.y; vert[1].p[0] = (UINT32)v2.z; vert[1].p[1] = v2.u; vert[1].p[2] = v2.v; vert[2].x = v3.x; vert[2].y = v3.y; vert[2].p[0] = (UINT32)v3.z; vert[2].p[1] = v3.u; vert[2].p[2] = v3.v; scans = setup_triangle_3(&vert[0], &vert[1], &vert[2], &clip3d); if(scans) { INT64 du, dv, dz; dz = scans->dp[0]; du = scans->dp[1]; dv = scans->dp[2]; for(y = scans->sy; y <= scans->ey; y++) { int x1, x2; INT64 u, v, z; INT64 u2, v2; const struct poly_scanline *scan = &scans->scanline[y - scans->sy]; UINT16 *p = BITMAP_ADDR16(bitmap3d, y, 0); UINT32 *d = BITMAP_ADDR32(zbuffer, y, 0); x1 = scan->sx; x2 = scan->ex; z = scans->scanline[y - scans->sy].p[0]; u = scans->scanline[y - scans->sy].p[1]; v = scans->scanline[y - scans->sy].p[2]; for(x = x1; x <= x2; x++) { // UINT16 pix; int iu, iv; UINT32 iz = z >> 16; if (iz) { u2 = (u << ZDIVIDE_SHIFT) / iz; v2 = (v << ZDIVIDE_SHIFT) / iz; } else { u2 = 0; v2 = 0; } iz |= viewport_priority; if(iz > d[x]) { iu = texture_u_table[(u2 >> texture_coord_shift) & texture_width_mask]; iv = texture_v_table[(v2 >> texture_coord_shift) & texture_height_mask]; #if BILINEAR { int iu2 = texture_u_table[((u2 >> texture_coord_shift) + 1) & texture_width_mask]; int iv2 = texture_v_table[((v2 >> texture_coord_shift) + 1) & texture_height_mask]; UINT32 sr[4], sg[4], sb[4]; UINT32 ur[2], ug[2], ub[2]; UINT32 fr, fg, fb; UINT32 pr, pg, pb; UINT16 pix0 = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu)]; UINT16 pix1 = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu2)]; UINT16 pix2 = texture_ram[texture_page][(texture_y+iv2) * 2048 + (texture_x+iu)]; UINT16 pix3 = texture_ram[texture_page][(texture_y+iv2) * 2048 + (texture_x+iu2)]; int u_sub1 = (u2 >> (texture_coord_shift-16)) & 0xffff; int v_sub1 = (v2 >> (texture_coord_shift-16)) & 0xffff; int u_sub0 = 0xffff - u_sub1; int v_sub0 = 0xffff - v_sub1; sr[0] = (pix0 & 0x7c00); sg[0] = (pix0 & 0x03e0); sb[0] = (pix0 & 0x001f); sr[1] = (pix1 & 0x7c00); sg[1] = (pix1 & 0x03e0); sb[1] = (pix1 & 0x001f); sr[2] = (pix2 & 0x7c00); sg[2] = (pix2 & 0x03e0); sb[2] = (pix2 & 0x001f); sr[3] = (pix3 & 0x7c00); sg[3] = (pix3 & 0x03e0); sb[3] = (pix3 & 0x001f); /* Calculate weighted U-samples */ ur[0] = (((sr[0] * u_sub0) >> 16) + ((sr[1] * u_sub1) >> 16)); ug[0] = (((sg[0] * u_sub0) >> 16) + ((sg[1] * u_sub1) >> 16)); ub[0] = (((sb[0] * u_sub0) >> 16) + ((sb[1] * u_sub1) >> 16)); ur[1] = (((sr[2] * u_sub0) >> 16) + ((sr[3] * u_sub1) >> 16)); ug[1] = (((sg[2] * u_sub0) >> 16) + ((sg[3] * u_sub1) >> 16)); ub[1] = (((sb[2] * u_sub0) >> 16) + ((sb[3] * u_sub1) >> 16)); /* Calculate the final sample */ fr = (((ur[0] * v_sub0) >> 16) + ((ur[1] * v_sub1) >> 16)); fg = (((ug[0] * v_sub0) >> 16) + ((ug[1] * v_sub1) >> 16)); fb = (((ub[0] * v_sub0) >> 16) + ((ub[1] * v_sub1) >> 16)); // apply intensity fr = (fr * polygon_intensity) >> 8; fg = (fg * polygon_intensity) >> 8; fb = (fb * polygon_intensity) >> 8; /* Blend with existing framebuffer pixels */ pr = (p[x] & 0x7c00); pg = (p[x] & 0x03e0); pb = (p[x] & 0x001f); fr = ((pr * (31-polygon_transparency)) + (fr * polygon_transparency)) >> 5; fg = ((pg * (31-polygon_transparency)) + (fg * polygon_transparency)) >> 5; fb = ((pb * (31-polygon_transparency)) + (fb * polygon_transparency)) >> 5; p[x] = (fr & 0x7c00) | (fg & 0x3e0) | (fb & 0x1f); } #else pix = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu)]; p[x] = pix & 0x7fff; #endif } z += dz; u += du; v += dv; } } } } static void draw_triangle_tex1555_alpha(VERTEX v1, VERTEX v2, VERTEX v3) { int x, y; struct poly_vertex vert[3]; const struct poly_scanline_data *scans; v1.z = (1.0 / v1.z) * ZBUFFER_SCALE; v2.z = (1.0 / v2.z) * ZBUFFER_SCALE; v3.z = (1.0 / v3.z) * ZBUFFER_SCALE; v1.u = (UINT32)((UINT64)(v1.u * v1.z) >> ZDIVIDE_SHIFT); v1.v = (UINT32)((UINT64)(v1.v * v1.z) >> ZDIVIDE_SHIFT); v2.u = (UINT32)((UINT64)(v2.u * v2.z) >> ZDIVIDE_SHIFT); v2.v = (UINT32)((UINT64)(v2.v * v2.z) >> ZDIVIDE_SHIFT); v3.u = (UINT32)((UINT64)(v3.u * v3.z) >> ZDIVIDE_SHIFT); v3.v = (UINT32)((UINT64)(v3.v * v3.z) >> ZDIVIDE_SHIFT); vert[0].x = v1.x; vert[0].y = v1.y; vert[0].p[0] = (UINT32)v1.z; vert[0].p[1] = v1.u; vert[0].p[2] = v1.v; vert[1].x = v2.x; vert[1].y = v2.y; vert[1].p[0] = (UINT32)v2.z; vert[1].p[1] = v2.u; vert[1].p[2] = v2.v; vert[2].x = v3.x; vert[2].y = v3.y; vert[2].p[0] = (UINT32)v3.z; vert[2].p[1] = v3.u; vert[2].p[2] = v3.v; scans = setup_triangle_3(&vert[0], &vert[1], &vert[2], &clip3d); if(scans) { INT64 du, dv, dz; dz = scans->dp[0]; du = scans->dp[1]; dv = scans->dp[2]; for(y = scans->sy; y <= scans->ey; y++) { int x1, x2; INT64 u, v, z; INT64 u2, v2; const struct poly_scanline *scan = &scans->scanline[y - scans->sy]; UINT16 *p = BITMAP_ADDR16(bitmap3d, y, 0); UINT32 *d = BITMAP_ADDR32(zbuffer, y, 0); x1 = scan->sx; x2 = scan->ex; z = scans->scanline[y - scans->sy].p[0]; u = scans->scanline[y - scans->sy].p[1]; v = scans->scanline[y - scans->sy].p[2]; for(x = x1; x <= x2; x++) { // UINT16 pix; int iu, iv; UINT32 iz = z >> 16; if (iz) { u2 = (u << ZDIVIDE_SHIFT) / iz; v2 = (v << ZDIVIDE_SHIFT) / iz; } else { u2 = 0; v2 = 0; } iz |= viewport_priority; if(iz >= d[x]) { iu = texture_u_table[(u2 >> texture_coord_shift) & texture_width_mask]; iv = texture_v_table[(v2 >> texture_coord_shift) & texture_height_mask]; #if BILINEAR { int iu2 = texture_u_table[((u2 >> texture_coord_shift) + 1) & texture_width_mask]; int iv2 = texture_v_table[((v2 >> texture_coord_shift) + 1) & texture_height_mask]; UINT32 sr[4], sg[4], sb[4], sa[4]; UINT32 ur[2], ug[2], ub[2], ua[4]; UINT32 fr, fg, fb, fa; UINT32 pr, pg, pb; UINT16 pix0 = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu)]; UINT16 pix1 = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu2)]; UINT16 pix2 = texture_ram[texture_page][(texture_y+iv2) * 2048 + (texture_x+iu)]; UINT16 pix3 = texture_ram[texture_page][(texture_y+iv2) * 2048 + (texture_x+iu2)]; int u_sub1 = (u2 >> (texture_coord_shift-16)) & 0xffff; int v_sub1 = (v2 >> (texture_coord_shift-16)) & 0xffff; int u_sub0 = 0xffff - u_sub1; int v_sub0 = 0xffff - v_sub1; sr[0] = (pix0 & 0x7c00); sg[0] = (pix0 & 0x03e0); sb[0] = (pix0 & 0x001f); sa[0] = (pix0 & 0x8000) ? 0 : 16; sr[1] = (pix1 & 0x7c00); sg[1] = (pix1 & 0x03e0); sb[1] = (pix1 & 0x001f); sa[1] = (pix1 & 0x8000) ? 0 : 16; sr[2] = (pix2 & 0x7c00); sg[2] = (pix2 & 0x03e0); sb[2] = (pix2 & 0x001f); sa[2] = (pix2 & 0x8000) ? 0 : 16; sr[3] = (pix3 & 0x7c00); sg[3] = (pix3 & 0x03e0); sb[3] = (pix3 & 0x001f); sa[3] = (pix3 & 0x8000) ? 0 : 16; /* Calculate weighted U-samples */ ur[0] = (((sr[0] * u_sub0) >> 16) + ((sr[1] * u_sub1) >> 16)); ug[0] = (((sg[0] * u_sub0) >> 16) + ((sg[1] * u_sub1) >> 16)); ub[0] = (((sb[0] * u_sub0) >> 16) + ((sb[1] * u_sub1) >> 16)); ua[0] = (((sa[0] * u_sub0) >> 16) + ((sa[1] * u_sub1) >> 16)); ur[1] = (((sr[2] * u_sub0) >> 16) + ((sr[3] * u_sub1) >> 16)); ug[1] = (((sg[2] * u_sub0) >> 16) + ((sg[3] * u_sub1) >> 16)); ub[1] = (((sb[2] * u_sub0) >> 16) + ((sb[3] * u_sub1) >> 16)); ua[1] = (((sa[2] * u_sub0) >> 16) + ((sa[3] * u_sub1) >> 16)); /* Calculate the final sample */ fr = (((ur[0] * v_sub0) >> 16) + ((ur[1] * v_sub1) >> 16)); fg = (((ug[0] * v_sub0) >> 16) + ((ug[1] * v_sub1) >> 16)); fb = (((ub[0] * v_sub0) >> 16) + ((ub[1] * v_sub1) >> 16)); fa = (((ua[0] * v_sub0) >> 16) + ((ua[1] * v_sub1) >> 16)); // apply intensity fr = (fr * polygon_intensity) >> 8; fg = (fg * polygon_intensity) >> 8; fb = (fb * polygon_intensity) >> 8; /* Blend with existing framebuffer pixels */ pr = (p[x] & 0x7c00); pg = (p[x] & 0x03e0); pb = (p[x] & 0x001f); fr = ((pr * (16 - fa)) + (fr * fa)) >> 4; fg = ((pg * (16 - fa)) + (fg * fa)) >> 4; fb = ((pb * (16 - fa)) + (fb * fa)) >> 4; p[x] = (fr & 0x7c00) | (fg & 0x3e0) | (fb & 0x1f); } #else pix = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu)]; p[x] = pix & 0x7fff; #endif } z += dz; u += du; v += dv; } } } } static void draw_triangle_tex4444(VERTEX v1, VERTEX v2, VERTEX v3) { int x, y; struct poly_vertex vert[3]; const struct poly_scanline_data *scans; v1.z = (1.0 / v1.z) * ZBUFFER_SCALE; v2.z = (1.0 / v2.z) * ZBUFFER_SCALE; v3.z = (1.0 / v3.z) * ZBUFFER_SCALE; v1.u = (UINT32)((UINT64)(v1.u * v1.z) >> ZDIVIDE_SHIFT); v1.v = (UINT32)((UINT64)(v1.v * v1.z) >> ZDIVIDE_SHIFT); v2.u = (UINT32)((UINT64)(v2.u * v2.z) >> ZDIVIDE_SHIFT); v2.v = (UINT32)((UINT64)(v2.v * v2.z) >> ZDIVIDE_SHIFT); v3.u = (UINT32)((UINT64)(v3.u * v3.z) >> ZDIVIDE_SHIFT); v3.v = (UINT32)((UINT64)(v3.v * v3.z) >> ZDIVIDE_SHIFT); vert[0].x = v1.x; vert[0].y = v1.y; vert[0].p[0] = (UINT32)v1.z; vert[0].p[1] = v1.u; vert[0].p[2] = v1.v; vert[1].x = v2.x; vert[1].y = v2.y; vert[1].p[0] = (UINT32)v2.z; vert[1].p[1] = v2.u; vert[1].p[2] = v2.v; vert[2].x = v3.x; vert[2].y = v3.y; vert[2].p[0] = (UINT32)v3.z; vert[2].p[1] = v3.u; vert[2].p[2] = v3.v; scans = setup_triangle_3(&vert[0], &vert[1], &vert[2], &clip3d); if(scans) { INT64 du, dv, dz; dz = scans->dp[0]; du = scans->dp[1]; dv = scans->dp[2]; for(y = scans->sy; y <= scans->ey; y++) { int x1, x2; INT64 u, v, z; INT64 u2, v2; const struct poly_scanline *scan = &scans->scanline[y - scans->sy]; UINT16 *p = BITMAP_ADDR16(bitmap3d, y, 0); UINT32 *d = BITMAP_ADDR32(zbuffer, y, 0); x1 = scan->sx; x2 = scan->ex; z = scans->scanline[y - scans->sy].p[0]; u = scans->scanline[y - scans->sy].p[1]; v = scans->scanline[y - scans->sy].p[2]; for(x = x1; x <= x2; x++) { // UINT16 pix; // UINT16 r,g,b; int iu, iv; UINT32 iz = z >> 16; if (iz) { u2 = (u << ZDIVIDE_SHIFT) / iz; v2 = (v << ZDIVIDE_SHIFT) / iz; } else { u2 = 0; v2 = 0; } iz |= viewport_priority; if(iz > d[x]) { iu = texture_u_table[(u2 >> texture_coord_shift) & texture_width_mask]; iv = texture_v_table[(v2 >> texture_coord_shift) & texture_height_mask]; #if BILINEAR { int iu2 = texture_u_table[((u2 >> texture_coord_shift) + 1) & texture_width_mask]; int iv2 = texture_v_table[((v2 >> texture_coord_shift) + 1) & texture_height_mask]; UINT32 sr[4], sg[4], sb[4]; UINT32 ur[2], ug[2], ub[2]; UINT32 fr, fg, fb; UINT16 pix0 = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu)]; UINT16 pix1 = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu2)]; UINT16 pix2 = texture_ram[texture_page][(texture_y+iv2) * 2048 + (texture_x+iu)]; UINT16 pix3 = texture_ram[texture_page][(texture_y+iv2) * 2048 + (texture_x+iu2)]; int u_sub1 = (u2 >> (texture_coord_shift-16)) & 0xffff; int v_sub1 = (v2 >> (texture_coord_shift-16)) & 0xffff; int u_sub0 = 0xffff - u_sub1; int v_sub0 = 0xffff - v_sub1; sr[0] = (pix0 & 0xf000); sg[0] = (pix0 & 0x0f00); sb[0] = (pix0 & 0x00f0); sr[1] = (pix1 & 0xf000); sg[1] = (pix1 & 0x0f00); sb[1] = (pix1 & 0x00f0); sr[2] = (pix2 & 0xf000); sg[2] = (pix2 & 0x0f00); sb[2] = (pix2 & 0x00f0); sr[3] = (pix3 & 0xf000); sg[3] = (pix3 & 0x0f00); sb[3] = (pix3 & 0x00f0); /* Calculate weighted U-samples */ ur[0] = (((sr[0] * u_sub0) >> 16) + ((sr[1] * u_sub1) >> 16)); ug[0] = (((sg[0] * u_sub0) >> 16) + ((sg[1] * u_sub1) >> 16)); ub[0] = (((sb[0] * u_sub0) >> 16) + ((sb[1] * u_sub1) >> 16)); ur[1] = (((sr[2] * u_sub0) >> 16) + ((sr[3] * u_sub1) >> 16)); ug[1] = (((sg[2] * u_sub0) >> 16) + ((sg[3] * u_sub1) >> 16)); ub[1] = (((sb[2] * u_sub0) >> 16) + ((sb[3] * u_sub1) >> 16)); /* Calculate the final sample */ fr = (((ur[0] * v_sub0) >> 16) + ((ur[1] * v_sub1) >> 16)); fg = (((ug[0] * v_sub0) >> 16) + ((ug[1] * v_sub1) >> 16)); fb = (((ub[0] * v_sub0) >> 16) + ((ub[1] * v_sub1) >> 16)); // apply intensity fr = (fr * polygon_intensity) >> 8; fg = (fg * polygon_intensity) >> 8; fb = (fb * polygon_intensity) >> 8; p[x] = ((fr & 0xf800) >> 1) | ((fg & 0x0f80) >> 2) | ((fb & 0x00f8) >> 3); } #else pix = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu)]; r = (pix & 0xf000) >> 1; g = (pix & 0x0f00) >> 2; b = (pix & 0x00f0) >> 3; p[x] = r | g | b; #endif d[x] = iz; /* write new zbuffer value */ } z += dz; u += du; v += dv; } } } } static void draw_triangle_tex4444_alpha(VERTEX v1, VERTEX v2, VERTEX v3) { int x, y; struct poly_vertex vert[3]; const struct poly_scanline_data *scans; v1.z = (1.0 / v1.z) * ZBUFFER_SCALE; v2.z = (1.0 / v2.z) * ZBUFFER_SCALE; v3.z = (1.0 / v3.z) * ZBUFFER_SCALE; v1.u = (UINT32)((UINT64)(v1.u * v1.z) >> ZDIVIDE_SHIFT); v1.v = (UINT32)((UINT64)(v1.v * v1.z) >> ZDIVIDE_SHIFT); v2.u = (UINT32)((UINT64)(v2.u * v2.z) >> ZDIVIDE_SHIFT); v2.v = (UINT32)((UINT64)(v2.v * v2.z) >> ZDIVIDE_SHIFT); v3.u = (UINT32)((UINT64)(v3.u * v3.z) >> ZDIVIDE_SHIFT); v3.v = (UINT32)((UINT64)(v3.v * v3.z) >> ZDIVIDE_SHIFT); vert[0].x = v1.x; vert[0].y = v1.y; vert[0].p[0] = (UINT32)v1.z; vert[0].p[1] = v1.u; vert[0].p[2] = v1.v; vert[1].x = v2.x; vert[1].y = v2.y; vert[1].p[0] = (UINT32)v2.z; vert[1].p[1] = v2.u; vert[1].p[2] = v2.v; vert[2].x = v3.x; vert[2].y = v3.y; vert[2].p[0] = (UINT32)v3.z; vert[2].p[1] = v3.u; vert[2].p[2] = v3.v; scans = setup_triangle_3(&vert[0], &vert[1], &vert[2], &clip3d); if(scans) { INT64 du, dv, dz; dz = scans->dp[0]; du = scans->dp[1]; dv = scans->dp[2]; for(y = scans->sy; y <= scans->ey; y++) { int x1, x2; INT64 u, v, z; INT64 u2, v2; const struct poly_scanline *scan = &scans->scanline[y - scans->sy]; UINT16 *p = BITMAP_ADDR16(bitmap3d, y, 0); UINT32 *d = BITMAP_ADDR32(zbuffer, y, 0); x1 = scan->sx; x2 = scan->ex; z = scans->scanline[y - scans->sy].p[0]; u = scans->scanline[y - scans->sy].p[1]; v = scans->scanline[y - scans->sy].p[2]; for(x = x1; x <= x2; x++) { // UINT16 pix; // UINT16 r,g,b; int iu, iv; UINT32 iz = z >> 16; if (iz) { u2 = (u << ZDIVIDE_SHIFT) / iz; v2 = (v << ZDIVIDE_SHIFT) / iz; } else { u2 = 0; v2 = 0; } iz |= viewport_priority; if(iz >= d[x]) { iu = texture_u_table[(u2 >> texture_coord_shift) & texture_width_mask]; iv = texture_v_table[(v2 >> texture_coord_shift) & texture_height_mask]; #if BILINEAR { int iu2 = texture_u_table[((u2 >> texture_coord_shift) + 1) & texture_width_mask]; int iv2 = texture_v_table[((v2 >> texture_coord_shift) + 1) & texture_height_mask]; UINT32 sr[4], sg[4], sb[4], sa[4]; UINT32 ur[2], ug[2], ub[2], ua[4]; UINT32 pr, pg, pb;//, br, bg, bb; UINT32 fr, fg, fb, fa; UINT16 pix0 = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu)]; UINT16 pix1 = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu2)]; UINT16 pix2 = texture_ram[texture_page][(texture_y+iv2) * 2048 + (texture_x+iu)]; UINT16 pix3 = texture_ram[texture_page][(texture_y+iv2) * 2048 + (texture_x+iu2)]; int u_sub1 = (u2 >> (texture_coord_shift-16)) & 0xffff; int v_sub1 = (v2 >> (texture_coord_shift-16)) & 0xffff; int u_sub0 = 0xffff - u_sub1; int v_sub0 = 0xffff - v_sub1; sr[0] = (pix0 & 0xf000); sg[0] = (pix0 & 0x0f00); sb[0] = (pix0 & 0x00f0); sa[0] = (pix0 & 0x000f) + ((pix0 >> 1) & 1); sr[1] = (pix1 & 0xf000); sg[1] = (pix1 & 0x0f00); sb[1] = (pix1 & 0x00f0); sa[1] = (pix1 & 0x000f) + ((pix1 >> 1) & 1); sr[2] = (pix2 & 0xf000); sg[2] = (pix2 & 0x0f00); sb[2] = (pix2 & 0x00f0); sa[2] = (pix2 & 0x000f) + ((pix2 >> 1) & 1); sr[3] = (pix3 & 0xf000); sg[3] = (pix3 & 0x0f00); sb[3] = (pix3 & 0x00f0); sa[3] = (pix3 & 0x000f) + ((pix3 >> 1) & 1); /* Calculate weighted U-samples */ ur[0] = ((sr[0] * u_sub0) + (sr[1] * u_sub1)) >> 16; ug[0] = ((sg[0] * u_sub0) + (sg[1] * u_sub1)) >> 16; ub[0] = ((sb[0] * u_sub0) + (sb[1] * u_sub1)) >> 16; ua[0] = ((sa[0] * u_sub0) + (sa[1] * u_sub1)) >> 16; ur[1] = ((sr[2] * u_sub0) + (sr[3] * u_sub1)) >> 16; ug[1] = ((sg[2] * u_sub0) + (sg[3] * u_sub1)) >> 16; ub[1] = ((sb[2] * u_sub0) + (sb[3] * u_sub1)) >> 16; ua[1] = ((sa[2] * u_sub0) + (sa[3] * u_sub1)) >> 16; /* Calculate the final sample */ fr = ((ur[0] * v_sub0) + (ur[1] * v_sub1)) >> 16; fg = ((ug[0] * v_sub0) + (ug[1] * v_sub1)) >> 16; fb = ((ub[0] * v_sub0) + (ub[1] * v_sub1)) >> 16; fa = ((ua[0] * v_sub0) + (ua[1] * v_sub1)) >> 16; // apply intensity fr = (fr * polygon_intensity) >> 8; fg = (fg * polygon_intensity) >> 8; fb = (fb * polygon_intensity) >> 8; fa = (polygon_transparency * fa) >> 5; /* Blend with existing framebuffer pixels */ pr = (p[x] & 0x7c00) << 1; pg = (p[x] & 0x03e0) << 2; pb = (p[x] & 0x001f) << 3; fr = ((pr * (16 - fa)) + (fr * fa)) >> 4; fg = ((pg * (16 - fa)) + (fg * fa)) >> 4; fb = ((pb * (16 - fa)) + (fb * fa)) >> 4; p[x] = ((fr & 0xf800) >> 1) | ((fg & 0x0f80) >> 2) | ((fb & 0x00f8) >> 3); } #else pix = texture_ram[texture_page][(texture_y+iv) * 2048 + (texture_x+iu)]; r = (pix & 0xf000) >> 1; g = (pix & 0x0f00) >> 2; b = (pix & 0x00f0) >> 3; p[x] = r | g | b; #endif } z += dz; u += du; v += dv; } } } } static void draw_triangle_color(VERTEX v1, VERTEX v2, VERTEX v3, UINT16 color) { int x, y; struct poly_vertex vert[3]; const struct poly_scanline_data *scans; v1.z = (1.0 / v1.z) * ZBUFFER_SCALE; v2.z = (1.0 / v2.z) * ZBUFFER_SCALE; v3.z = (1.0 / v3.z) * ZBUFFER_SCALE; vert[0].x = v1.x; vert[0].y = v1.y; vert[0].p[0] = (UINT32)v1.z; vert[1].x = v2.x; vert[1].y = v2.y; vert[1].p[0] = (UINT32)v2.z; vert[2].x = v3.x; vert[2].y = v3.y; vert[2].p[0] = (UINT32)v3.z; scans = setup_triangle_1(&vert[0], &vert[1], &vert[2], &clip3d); if(scans) { INT64 dz = scans->dp[0]; for(y = scans->sy; y <= scans->ey; y++) { int x1, x2; INT64 z; const struct poly_scanline *scan = &scans->scanline[y - scans->sy]; UINT16 *p = BITMAP_ADDR16(bitmap3d, y, 0); UINT32 *d = BITMAP_ADDR32(zbuffer, y, 0); x1 = scan->sx; x2 = scan->ex; z = scans->scanline[y - scans->sy].p[0]; for(x = x1; x <= x2; x++) { UINT32 fr, fg, fb; UINT32 iz = z >> 16; iz |= viewport_priority; if(iz > d[x]) { fr = color & 0x7c00; fg = color & 0x03e0; fb = color & 0x001f; // apply intensity fr = (fr * polygon_intensity) >> 8; fg = (fg * polygon_intensity) >> 8; fb = (fb * polygon_intensity) >> 8; p[x] = (fr & 0x7c00) | (fg & 0x03e0) | (fb & 0x1f); d[x] = iz; /* write new zbuffer value */ } z += dz; } } } } static void draw_triangle_color_trans(VERTEX v1, VERTEX v2, VERTEX v3, UINT16 color) { int x, y; struct poly_vertex vert[3]; const struct poly_scanline_data *scans; v1.z = (1.0 / v1.z) * ZBUFFER_SCALE; v2.z = (1.0 / v2.z) * ZBUFFER_SCALE; v3.z = (1.0 / v3.z) * ZBUFFER_SCALE; vert[0].x = v1.x; vert[0].y = v1.y; vert[0].p[0] = (UINT32)v1.z; vert[1].x = v2.x; vert[1].y = v2.y; vert[1].p[0] = (UINT32)v2.z; vert[2].x = v3.x; vert[2].y = v3.y; vert[2].p[0] = (UINT32)v3.z; scans = setup_triangle_1(&vert[0], &vert[1], &vert[2], &clip3d); if(scans) { INT64 dz = scans->dp[0]; for(y = scans->sy; y <= scans->ey; y++) { int x1, x2; INT64 z; UINT32 fr, fg, fb; UINT32 pr, pg, pb; const struct poly_scanline *scan = &scans->scanline[y - scans->sy]; UINT16 *p = BITMAP_ADDR16(bitmap3d, y, 0); UINT32 *d = BITMAP_ADDR32(zbuffer, y, 0); x1 = scan->sx; x2 = scan->ex; z = scans->scanline[y - scans->sy].p[0]; for(x = x1; x <= x2; x++) { UINT32 iz = z >> 16; iz |= viewport_priority; if(iz > d[x]) { fr = color & 0x7c00; fg = color & 0x03e0; fb = color & 0x001f; // apply intensity fr = (fr * polygon_intensity) >> 8; fg = (fg * polygon_intensity) >> 8; fb = (fb * polygon_intensity) >> 8; /* Blend with existing framebuffer pixels */ pr = (p[x] & 0x7c00); pg = (p[x] & 0x03e0); pb = (p[x] & 0x001f); fr = ((pr * (31 - polygon_transparency)) + (fr * polygon_transparency)) >> 5; fg = ((pg * (31 - polygon_transparency)) + (fg * polygon_transparency)) >> 5; fb = ((pb * (31 - polygon_transparency)) + (fb * polygon_transparency)) >> 5; p[x] = (fr & 0x7c00) | (fg & 0x03e0) | (fb & 0x1f); } z += dz; } } } }
155586.c
/* PR c/9799 */ /* Verify that GCC doesn't crash on excess elements in initializer for a flexible array member. */ typedef struct { int aaa; } s1_t; typedef struct { int bbb; s1_t s1_array[]; } s2_t; static s2_t s2_array[]= { { 1, 4 }, /* { dg-error "(initialization of flexible array member|near)" } */ { 2, 5 }, /* { dg-error "(initialization of flexible array member|near)" } */ { 3, 6 } /* { dg-error "(initialization of flexible array member|near)" } */ };
332092.c
/* Portions of this file are subject to the following copyright(s). See * the Net-SNMP's COPYING file for more details and other copyrights * that may apply: */ /* * Portions of this file are copyrighted by: * Copyright © 2003 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms specified in the COPYING file * distributed with the Net-SNMP package. * * Portions of this file are copyrighted by: * Copyright (c) 2016 VMware, Inc. All rights reserved. * Use is subject to license terms specified in the COPYING file * distributed with the Net-SNMP package. */ /* * snmpusm.c * * Routines to manipulate a information about a "user" as * defined by the SNMP-USER-BASED-SM-MIB MIB. * * All functions usm_set_usmStateReference_*() return 0 on success, -1 * otherwise. * * !! Tab stops set to 4 in some parts of this file. !! * (Designated on a per function.) */ #include <net-snmp/net-snmp-config.h> #include <net-snmp/net-snmp-features.h> #include <sys/types.h> #include <stdio.h> #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #if TIME_WITH_SYS_TIME # include <sys/time.h> # include <time.h> #else # if HAVE_SYS_TIME_H # include <sys/time.h> # else # include <time.h> # endif #endif #if HAVE_STRING_H #include <string.h> #else #include <strings.h> #endif #ifdef HAVE_NETINET_IN_H #include <netinet/in.h> #endif #if HAVE_UNISTD_H #include <unistd.h> #endif #include <net-snmp/types.h> #include <net-snmp/output_api.h> #include <net-snmp/config_api.h> #include <net-snmp/utilities.h> #include <net-snmp/library/asn1.h> #include <net-snmp/library/snmp_api.h> #include <net-snmp/library/callback.h> #include <net-snmp/library/tools.h> #include <net-snmp/library/keytools.h> #include <net-snmp/library/snmpv3.h> #include <net-snmp/library/lcd_time.h> #include <net-snmp/library/scapi.h> #include <net-snmp/library/callback.h> #include <net-snmp/library/snmp_secmod.h> #include <net-snmp/library/snmpusm.h> #include <net-snmp/library/transform_oids.h> #include <net-snmp/library/snmp_enum.h> #ifdef HAVE_OPENSSL_DH_H #include <openssl/dh.h> #endif netsnmp_feature_child_of(usm_all, libnetsnmp); netsnmp_feature_child_of(usm_support, usm_all); netsnmp_feature_require(usm_support); struct usmStateReference { int refcnt; char *usr_name; size_t usr_name_length; u_char *usr_engine_id; size_t usr_engine_id_length; oid *usr_auth_protocol; size_t usr_auth_protocol_length; u_char *usr_auth_key; size_t usr_auth_key_length; oid *usr_priv_protocol; size_t usr_priv_protocol_length; u_char *usr_priv_key; size_t usr_priv_key_length; u_int usr_sec_level; }; const oid usmNoAuthProtocol[10] = { NETSNMP_USMAUTH_BASE_OID, NETSNMP_USMAUTH_NOAUTH }; #ifndef NETSNMP_DISABLE_MD5 const oid usmHMACMD5AuthProtocol[10] = { NETSNMP_USMAUTH_BASE_OID, NETSNMP_USMAUTH_HMACMD5 }; #endif const oid usmHMACSHA1AuthProtocol[10] = { NETSNMP_USMAUTH_BASE_OID, NETSNMP_USMAUTH_HMACSHA1 }; #ifdef HAVE_EVP_SHA384 const oid usmHMAC384SHA512AuthProtocol[10] = { NETSNMP_USMAUTH_BASE_OID, NETSNMP_USMAUTH_HMAC384SHA512 }; const oid usmHMAC256SHA384AuthProtocol[10] = { NETSNMP_USMAUTH_BASE_OID, NETSNMP_USMAUTH_HMAC256SHA384 }; #endif /* HAVE_EVP_SHA384 */ #ifdef HAVE_EVP_SHA224 const oid usmHMAC192SHA256AuthProtocol[10] = { NETSNMP_USMAUTH_BASE_OID, NETSNMP_USMAUTH_HMAC192SHA256 }; const oid usmHMAC128SHA224AuthProtocol[10] = { NETSNMP_USMAUTH_BASE_OID, NETSNMP_USMAUTH_HMAC128SHA224 }; #endif /* HAVE_EVP_SHA384 */ const oid usmNoPrivProtocol[10] = { 1, 3, 6, 1, 6, 3, 10, 1, 2, 1 }; #ifndef NETSNMP_DISABLE_DES const oid usmDESPrivProtocol[10] = { 1, 3, 6, 1, 6, 3, 10, 1, 2, 2 }; #endif const oid usmAESPrivProtocol[10] = { 1, 3, 6, 1, 6, 3, 10, 1, 2, 4 }; /* backwards compat */ const oid *usmAES128PrivProtocol = usmAESPrivProtocol; #ifdef NETSNMP_DRAFT_BLUMENTHAL_AES_04 /* OIDs from http://www.snmp.com/eso/esoConsortiumMIB.txt */ const oid usmAES192PrivProtocol[9] = { 1,3,6,1,4,1,14832,1,3 }; const oid usmAES256PrivProtocol[9] = { 1,3,6,1,4,1,14832,1,4 }; /* OIDs from CISCO MIB */ const oid usmAES192CiscoPrivProtocol[11] = { 1,3,6,1,4,1,9,12,6,1,1 }; const oid usmAES256CiscoPrivProtocol[11] = { 1,3,6,1,4,1,9,12,6,1,2 }; /* * these OIDs are in pySNMP source as OIDs for AES+Reeder. We'll just * use OIDS from CISCO-SNMP-USM-OIDS-MIB * const oid usmAES192Cisco2PrivProtocol[11] = { 1,3,6,1,4,1,9,12,6,1,101 }; const oid usmAES256Cisco2PrivProtocol[11] = { 1,3,6,1,4,1,9,12,6,1,102 }; */ #endif /* NETSNMP_DRAFT_BLUMENTHAL_AES_04 */ typedef struct usm_alg_type_s { const char *label; int value; } usm_alg_type_t; static const usm_alg_type_t usm_auth_type[] = { { "NOAUTH", NETSNMP_USMAUTH_NOAUTH }, { "SHA", NETSNMP_USMAUTH_HMACSHA1 }, { "SHA-1", NETSNMP_USMAUTH_HMACSHA1 }, { "SHA1", NETSNMP_USMAUTH_HMACSHA1 }, #ifndef NETSNMP_DISABLE_MD5 { "MD5", NETSNMP_USMAUTH_HMACMD5 }, #endif #ifdef HAVE_EVP_SHA224 { "SHA-224", NETSNMP_USMAUTH_HMAC128SHA224 }, { "SHA224", NETSNMP_USMAUTH_HMAC128SHA224 }, { "SHA-256", NETSNMP_USMAUTH_HMAC192SHA256 }, { "SHA256", NETSNMP_USMAUTH_HMAC192SHA256 }, #endif #ifdef HAVE_EVP_SHA384 { "SHA-384", NETSNMP_USMAUTH_HMAC256SHA384 }, { "SHA384", NETSNMP_USMAUTH_HMAC256SHA384 }, { "SHA-512", NETSNMP_USMAUTH_HMAC384SHA512 }, { "SHA512", NETSNMP_USMAUTH_HMAC384SHA512 }, #endif { NULL, -1 } }; static const usm_alg_type_t usm_priv_type[] = { { "NOPRIV", USM_CREATE_USER_PRIV_NONE }, #ifndef NETSNMP_DISABLE_DES { "DES", USM_CREATE_USER_PRIV_DES }, #endif #ifdef HAVE_AES { "AES", USM_CREATE_USER_PRIV_AES }, { "AES-128", USM_CREATE_USER_PRIV_AES }, { "AES128", USM_CREATE_USER_PRIV_AES }, #ifdef NETSNMP_DRAFT_BLUMENTHAL_AES_04 { "AES-192", USM_CREATE_USER_PRIV_AES192 }, { "AES192", USM_CREATE_USER_PRIV_AES192 }, { "AES-256", USM_CREATE_USER_PRIV_AES256 }, { "AES256", USM_CREATE_USER_PRIV_AES256 }, /** cisco / pysnmp variations */ { "AES-192-C", USM_CREATE_USER_PRIV_AES192_CISCO }, { "AES192C", USM_CREATE_USER_PRIV_AES192_CISCO }, { "AES-256-C", USM_CREATE_USER_PRIV_AES256_CISCO }, { "AES256C", USM_CREATE_USER_PRIV_AES256_CISCO }, #endif #endif { NULL, -1 }, }; static u_int dummy_etime, dummy_eboot; /* For ISENGINEKNOWN(). */ /* * Set up default snmpv3 parameter value storage. */ #ifdef NETSNMP_SECMOD_USM static const oid *defaultAuthType = NULL; static size_t defaultAuthTypeLen = 0; static const oid *defaultPrivType = NULL; static size_t defaultPrivTypeLen = 0; #endif /* NETSNMP_SECMOD_USM */ /* * Globals. */ static u_int salt_integer; #ifdef HAVE_AES static u_int salt_integer64_1, salt_integer64_2; #endif /* * 1/2 of seed for the salt. Cf. RFC2274, Sect 8.1.1.1. */ static struct usmUser *noNameUser = NULL; /* * Local storage (LCD) of the default user list. */ static struct usmUser *userList = NULL; /* * Set a given field of the secStateRef. * * Allocate <len> bytes for type <type> pointed to by ref-><field>. * Then copy in <item> and record its length in ref-><field_len>. * * Return 0 on success, -1 otherwise. */ #define MAKE_ENTRY(ref, type, item, len, field, field_len) \ do { \ if (ref == NULL) \ return -1; \ if (ref->field != NULL) { \ SNMP_ZERO(ref->field, ref->field_len); \ SNMP_FREE(ref->field); \ } \ ref->field_len = 0; \ if (len == 0 || item == NULL) \ return 0; \ ref->field = netsnmp_memdup(item, len * sizeof(type)); \ if (ref->field == NULL) \ return -1; \ \ ref->field_len = len; \ return 0; \ } while (0) static int usm_clone_usmStateReference(struct usmStateReference *from, struct usmStateReference **to); static int free_enginetime_on_shutdown(int majorid, int minorid, void *serverarg, void *clientarg) { u_char engineID[SNMP_MAX_ENG_SIZE]; size_t engineID_len = sizeof(engineID); DEBUGMSGTL(("snmpv3", "free enginetime callback called\n")); engineID_len = snmpv3_get_engineID(engineID, engineID_len); if (engineID_len > 0) free_enginetime(engineID, engineID_len); return 0; } static struct usmStateReference * usm_malloc_usmStateReference(void) { struct usmStateReference *retval; retval = calloc(1, sizeof(struct usmStateReference)); if (retval) retval->refcnt = 1; return retval; } /* end usm_malloc_usmStateReference() */ static int usm_clone(netsnmp_pdu *pdu, netsnmp_pdu *new_pdu) { struct usmStateReference *ref = pdu->securityStateRef; struct usmStateReference **new_ref = (struct usmStateReference **)&new_pdu->securityStateRef; int ret = 0; if (!ref) return ret; if (pdu->command == SNMP_MSG_TRAP2) { netsnmp_assert(pdu->securityModel == SNMP_DEFAULT_SECMODEL); ret = usm_clone_usmStateReference(ref, new_ref); } else { netsnmp_assert(ref == *new_ref); ref->refcnt++; } return ret; } static void usm_free_usmStateReference(void *old) { struct usmStateReference *ref = old; if (!ref) return; if (--ref->refcnt > 0) return; SNMP_FREE(ref->usr_name); SNMP_FREE(ref->usr_engine_id); SNMP_FREE(ref->usr_auth_protocol); SNMP_FREE(ref->usr_priv_protocol); if (ref->usr_auth_key_length && ref->usr_auth_key) { SNMP_ZERO(ref->usr_auth_key, ref->usr_auth_key_length); SNMP_FREE(ref->usr_auth_key); } if (ref->usr_priv_key_length && ref->usr_priv_key) { SNMP_ZERO(ref->usr_priv_key, ref->usr_priv_key_length); SNMP_FREE(ref->usr_priv_key); } SNMP_FREE(ref); } /* end usm_free_usmStateReference() */ struct usmUser * usm_get_userList(void) { return userList; } static int usm_set_usmStateReference_name(struct usmStateReference *ref, char *name, size_t name_len) { MAKE_ENTRY(ref, char, name, name_len, usr_name, usr_name_length); } static int usm_set_usmStateReference_engine_id(struct usmStateReference *ref, u_char * engine_id, size_t engine_id_len) { MAKE_ENTRY(ref, u_char, engine_id, engine_id_len, usr_engine_id, usr_engine_id_length); } static int usm_set_usmStateReference_auth_protocol(struct usmStateReference *ref, oid * auth_protocol, size_t auth_protocol_len) { MAKE_ENTRY(ref, oid, auth_protocol, auth_protocol_len, usr_auth_protocol, usr_auth_protocol_length); } static int usm_set_usmStateReference_auth_key(struct usmStateReference *ref, u_char * auth_key, size_t auth_key_len) { MAKE_ENTRY(ref, u_char, auth_key, auth_key_len, usr_auth_key, usr_auth_key_length); } static int usm_set_usmStateReference_priv_protocol(struct usmStateReference *ref, oid * priv_protocol, size_t priv_protocol_len) { MAKE_ENTRY(ref, oid, priv_protocol, priv_protocol_len, usr_priv_protocol, usr_priv_protocol_length); } static int usm_set_usmStateReference_priv_key(struct usmStateReference *ref, u_char * priv_key, size_t priv_key_len) { MAKE_ENTRY(ref, u_char, priv_key, priv_key_len, usr_priv_key, usr_priv_key_length); } static int usm_set_usmStateReference_sec_level(struct usmStateReference *ref, int sec_level) { if (ref == NULL) return -1; ref->usr_sec_level = sec_level; return 0; } static int usm_clone_usmStateReference(struct usmStateReference *from, struct usmStateReference **to) { struct usmStateReference *cloned_usmStateRef; if (from == NULL || to == NULL) return -1; *to = usm_malloc_usmStateReference(); cloned_usmStateRef = *to; if (usm_set_usmStateReference_name(cloned_usmStateRef, from->usr_name, from->usr_name_length) || usm_set_usmStateReference_engine_id(cloned_usmStateRef, from->usr_engine_id, from->usr_engine_id_length) || usm_set_usmStateReference_auth_protocol(cloned_usmStateRef, from->usr_auth_protocol, from->usr_auth_protocol_length) || usm_set_usmStateReference_auth_key(cloned_usmStateRef, from->usr_auth_key, from->usr_auth_key_length) || usm_set_usmStateReference_priv_protocol(cloned_usmStateRef, from->usr_priv_protocol, from->usr_priv_protocol_length) || usm_set_usmStateReference_priv_key(cloned_usmStateRef, from->usr_priv_key, from->usr_priv_key_length) || usm_set_usmStateReference_sec_level(cloned_usmStateRef, from->usr_sec_level)) { usm_free_usmStateReference(*to); *to = NULL; return -1; } return 0; } #ifdef NETSNMP_ENABLE_TESTING_CODE /*******************************************************************-o-****** * emergency_print * * Parameters: * *field * length * * This is a print routine that is solely included so that it can be * used in gdb. Don't use it as a function, it will be pulled before * a real release of the code. * * tab stop 4 * * XXX fflush() only works on FreeBSD; core dumps on Sun OS's */ void emergency_print(u_char * field, u_int length) { int iindex; int start = 0; int stop = 25; while (start < stop) { for (iindex = start; iindex < stop; iindex++) printf("%02X ", field[iindex]); printf("\n"); start = stop; stop = stop + 25 < length ? stop + 25 : length; } fflush(0); } /* end emergency_print() */ #endif /* NETSNMP_ENABLE_TESTING_CODE */ static struct usmUser * usm_get_user_from_list(const u_char *engineID, size_t engineIDLen, const char *name, size_t nameLen, struct usmUser *puserList, int use_default) { struct usmUser *ptr; for (ptr = puserList; ptr != NULL; ptr = ptr->next) { if (ptr->name && strlen(ptr->name) == nameLen && memcmp(ptr->name, name, nameLen) == 0) { DEBUGMSGTL(("usm", "match on user %s\n", ptr->name)); if (ptr->engineIDLen == engineIDLen && ((ptr->engineID == NULL && engineID == NULL) || (ptr->engineID != NULL && engineID != NULL && memcmp(ptr->engineID, engineID, engineIDLen) == 0))) return ptr; DEBUGMSGTL(("usm", "no match on engineID (")); if (engineID) { DEBUGMSGHEX(("usm", engineID, engineIDLen)); } else { DEBUGMSGTL(("usm", "Empty EngineID")); } DEBUGMSG(("usm", ")\n")); } } /* * return "" user used to facilitate engineID discovery */ if (use_default && !strcmp(name, "")) return noNameUser; return NULL; } struct usmUser * usm_get_user2(const u_char *engineID, size_t engineIDLen, const void *name, size_t nameLen) { DEBUGMSGTL(("usm", "getting user %.*s\n", (int)nameLen, (const char *)name)); return usm_get_user_from_list(engineID, engineIDLen, name, nameLen, userList, 1); } /* * usm_get_user(): Returns a user from userList based on the engineID, * engineIDLen and name of the requested user. */ struct usmUser * usm_get_user(const u_char *engineID, size_t engineIDLen, const char *name) { return usm_get_user2(engineID, engineIDLen, name, strlen(name)); } static struct usmUser * usm_add_user_to_list(struct usmUser *user, struct usmUser *puserList) { struct usmUser *nptr, *pptr, *optr; /* * loop through puserList till we find the proper, sorted place to * insert the new user */ /* XXX - how to handle a NULL user->name ?? */ /* XXX - similarly for a NULL nptr->name ?? */ for (nptr = puserList, pptr = NULL; nptr != NULL; pptr = nptr, nptr = nptr->next) { if (nptr->engineIDLen > user->engineIDLen) break; if (user->engineID == NULL && nptr->engineID != NULL) break; if (nptr->engineIDLen == user->engineIDLen && (nptr->engineID != NULL && user->engineID != NULL && memcmp(nptr->engineID, user->engineID, user->engineIDLen) > 0)) break; if (!(nptr->engineID == NULL && user->engineID != NULL)) { if (nptr->engineIDLen == user->engineIDLen && ((nptr->engineID == NULL && user->engineID == NULL) || memcmp(nptr->engineID, user->engineID, user->engineIDLen) == 0) && strlen(nptr->name) > strlen(user->name)) break; if (nptr->engineIDLen == user->engineIDLen && ((nptr->engineID == NULL && user->engineID == NULL) || memcmp(nptr->engineID, user->engineID, user->engineIDLen) == 0) && strlen(nptr->name) == strlen(user->name) && strcmp(nptr->name, user->name) > 0) break; if (nptr->engineIDLen == user->engineIDLen && ((nptr->engineID == NULL && user->engineID == NULL) || memcmp(nptr->engineID, user->engineID, user->engineIDLen) == 0) && strlen(nptr->name) == strlen(user->name) && strcmp(nptr->name, user->name) == 0) { /* * the user is an exact match of a previous entry. * Credentials may be different, though, so remove * the old entry (and add the new one)! */ if (pptr) { /* change prev's next pointer */ pptr->next = nptr->next; } if (nptr->next) { /* change next's prev pointer */ nptr->next->prev = pptr; } optr = nptr; nptr = optr->next; /* add new user at this position */ /* free the old user */ optr->next=NULL; optr->prev=NULL; usm_free_user(optr); break; /* new user will be added below */ } } } /* * nptr should now point to the user that we need to add ourselves * in front of, and pptr should be our new 'prev'. */ /* * change our pointers */ user->prev = pptr; user->next = nptr; /* * change the next's prev pointer */ if (user->next) user->next->prev = user; /* * change the prev's next pointer */ if (user->prev) user->prev->next = user; /* * rewind to the head of the list and return it (since the new head * could be us, we need to notify the above routine who the head now is. */ for (pptr = user; pptr->prev != NULL; pptr = pptr->prev); return pptr; } /* * usm_add_user(): Add's a user to the userList, sorted by the * engineIDLength then the engineID then the name length then the name * to facilitate getNext calls on a usmUser table which is indexed by * these values. * * returns the head of the list (which could change due to this add). */ struct usmUser * usm_add_user(struct usmUser *user) { struct usmUser *uptr; uptr = usm_add_user_to_list(user, userList); if (uptr != NULL) userList = uptr; return uptr; } /* * usm_remove_usmUser_from_list remove user from (optional) list * * if list is not specified, defaults to global userList. * * returns SNMPERR_SUCCESS or SNMPERR_USM_UNKNOWNSECURITYNAME */ static int usm_remove_usmUser_from_list(struct usmUser *user, struct usmUser **ppuserList) { struct usmUser *nptr, *pptr; /* * NULL pointers aren't allowed */ if (ppuserList == NULL) ppuserList = &userList; if (*ppuserList == NULL) return SNMPERR_USM_UNKNOWNSECURITYNAME; /* * find the user in the list */ for (nptr = *ppuserList, pptr = NULL; nptr != NULL; pptr = nptr, nptr = nptr->next) { if (nptr == user) break; } if (nptr) { /* * remove the user from the linked list */ if (pptr) { pptr->next = nptr->next; } if (nptr->next) { nptr->next->prev = pptr; } } else { /* * user didn't exist */ return SNMPERR_USM_UNKNOWNSECURITYNAME; } if (nptr == *ppuserList) /* we're the head of the list, need to change * * the head to the next user */ *ppuserList = nptr->next; return SNMPERR_SUCCESS; } /* end usm_remove_usmUser_from_list() */ /* * usm_remove_user_from_list * * removes user from list. * * returns new list head on success, or NULL on error. * * NOTE: if there was only one user in the list, list head will be NULL. * So NULL can also mean success. Use the newer usm_remove_usmUser() for * more specific return codes. This function is kept for backwards * compatability with this ambiguous behaviour. */ static struct usmUser * usm_remove_user_from_list(struct usmUser *user, struct usmUser **ppuserList) { int rc = usm_remove_usmUser_from_list(user, ppuserList); if (rc != SNMPERR_SUCCESS || NULL == ppuserList) return NULL; return *ppuserList; } /* end usm_remove_user_from_list() */ /* * usm_remove_user(): finds and removes a user from a list */ struct usmUser * usm_remove_user(struct usmUser *user) { return usm_remove_user_from_list(user, &userList); } /* * usm_free_user(): calls free() on all needed parts of struct usmUser and * the user himself. * * Note: This should *not* be called on an object in a list (IE, * remove it from the list first, and set next and prev to NULL), but * will try to reconnect the list pieces again if it is called this * way. If called on the head of the list, the entire list will be * lost. */ struct usmUser * usm_free_user(struct usmUser *user) { if (user == NULL) return NULL; SNMP_FREE(user->engineID); SNMP_FREE(user->name); SNMP_FREE(user->secName); SNMP_FREE(user->cloneFrom); SNMP_FREE(user->userPublicString); SNMP_FREE(user->authProtocol); SNMP_FREE(user->privProtocol); if (user->authKey != NULL) { SNMP_ZERO(user->authKey, user->authKeyLen); SNMP_FREE(user->authKey); } if (user->privKey != NULL) { SNMP_ZERO(user->privKey, user->privKeyLen); SNMP_FREE(user->privKey); } if (user->authKeyKu != NULL) { SNMP_ZERO(user->authKeyKu, user->authKeyKuLen); SNMP_FREE(user->authKeyKu); } if (user->privKeyKu != NULL) { SNMP_ZERO(user->privKeyKu, user->privKeyKuLen); SNMP_FREE(user->privKeyKu); } #ifdef NETSNMP_USE_OPENSSL if (user->usmDHUserAuthKeyChange) { DH_free(user->usmDHUserAuthKeyChange); user->usmDHUserAuthKeyChange = NULL; } if (user->usmDHUserPrivKeyChange) { DH_free(user->usmDHUserPrivKeyChange); user->usmDHUserPrivKeyChange = NULL; } #endif /* * FIX Why not put this check *first?* */ if (user->prev != NULL && user->prev != (struct usmUser *)-1) { /* ack, this shouldn't happen */ user->prev->next = user->next; } if (user->next != NULL && user->next != (struct usmUser *)-1) { user->next->prev = user->prev; if (user->prev != NULL) /* ack this is really bad, because it means * * we'll loose the head of some structure tree */ DEBUGMSGTL(("usm", "Severe: Asked to free the head of a usmUser tree somewhere.")); } SNMP_ZERO(user, sizeof(*user)); SNMP_FREE(user); return NULL; /* for convenience to returns from calling functions */ } /* end usm_free_user() */ int usm_set_priv_key(struct usmUser *user, const char *fname, u_char **old_key, size_t *old_key_len, const u_char *new_key, u_int new_key_len) { u_char buf[SNMP_MAXBUF_SMALL], buf2[SNMP_MAXBUF_SMALL]; size_t buflen = sizeof(buf); int plen, res; plen = sc_get_proper_priv_length(user->privProtocol, user->privProtocolLen); DEBUGMSGTL(("usmUser", "plen %d\n", plen)); /* * extend key as needed */ DEBUGMSGTL(("9:usmUser", "%s: new_key_len %d\n", fname, new_key_len)); if (new_key_len < 2 * plen) { struct usmUser dummy; memset(&dummy, 0x0, sizeof(dummy)); dummy.engineID = user->engineID; dummy.engineIDLen = user->engineIDLen; dummy.authProtocol = user->authProtocol; dummy.authProtocolLen = user->authProtocolLen; dummy.privProtocol = user->privProtocol; dummy.privProtocolLen = user->privProtocolLen; memcpy(buf2, new_key, new_key_len); dummy.privKey = buf2; dummy.privKeyLen = new_key_len; res = usm_extend_user_kul(&dummy, sizeof(buf2)); if (res != SNMP_ERR_NOERROR) { DEBUGMSGTL(("usmUser", "%s: extend kul failed\n", fname)); return SNMP_ERR_GENERR; } DEBUGMSGTL(("9:usmUser", "%s: extend kul OK\n", fname)); new_key = dummy.privKey; new_key_len = dummy.privKeyLen; /* * make sure no reallocation happened; buf2 must be large enoungh */ netsnmp_assert(dummy.privKey == buf2); } /* * Change the key. */ DEBUGMSGTL(("usmUser", "%s: changing priv key for user %s\n", fname, user->secName)); res = decode_keychange(user->authProtocol, user->authProtocolLen, user->privKey, user->privKeyLen, new_key, new_key_len, buf, &buflen); if (res != SNMPERR_SUCCESS) { DEBUGMSGTL(("usmUser", "%s failed\n", fname)); return SNMP_ERR_GENERR; } DEBUGMSGTL(("usmUser", "%s succeeded\n", fname)); *old_key = user->privKey; *old_key_len = user->privKeyLen; user->privKey = netsnmp_memdup(buf, buflen); if (user->privKey == NULL) return SNMP_ERR_RESOURCEUNAVAILABLE; user->privKeyLen = buflen; return SNMP_ERR_NOERROR; } /*******************************************************************-o-****** * usm_generate_OID * * Parameters: * *prefix (I) OID prefix to the usmUser table entry. * prefixLen (I) * *uptr (I) Pointer to a user in the user list. * *length (O) Length of generated index OID. * * Returns: * Pointer to the OID index for the user (uptr) -OR- * NULL on failure. * * * Generate the index OID for a given usmUser name. 'length' is set to * the length of the index OID. * * Index OID format is: * * <...prefix>.<engineID_length>.<engineID>.<user_name_length>.<user_name> */ oid * usm_generate_OID(const oid *prefix, size_t prefixLen, const struct usmUser *uptr, size_t *length) { oid *indexOid; int i; *length = 2 + uptr->engineIDLen + strlen(uptr->name) + prefixLen; indexOid = malloc(*length * sizeof(oid)); if (!indexOid) return indexOid; memmove(indexOid, prefix, prefixLen * sizeof(oid)); indexOid[prefixLen] = uptr->engineIDLen; for (i = 0; i < uptr->engineIDLen; i++) indexOid[prefixLen + 1 + i] = (oid) uptr->engineID[i]; indexOid[prefixLen + uptr->engineIDLen + 1] = strlen(uptr->name); for (i = 0; i < strlen(uptr->name); i++) indexOid[prefixLen + uptr->engineIDLen + 2 + i] = (oid) uptr->name[i]; return indexOid; } /* end usm_generate_OID() */ /*******************************************************************-o-****** * asn_predict_int_length * * Parameters: * type (UNUSED) * number * len * * Returns: * Number of bytes necessary to store the ASN.1 encoded value of 'number'. * * * This gives the number of bytes that the ASN.1 encoder (in asn1.c) will * use to encode a particular integer value. * * Returns the length of the integer -- NOT THE HEADER! * * Do this the same way as asn_build_int()... */ static int asn_predict_int_length(int type, long number, size_t len) { register u_long mask; if (len != sizeof(long)) return -1; mask = ((u_long) 0x1FF) << ((8 * (sizeof(long) - 1)) - 1); /* * mask is 0xFF800000 on a big-endian machine */ while ((((number & mask) == 0) || ((number & mask) == mask)) && len > 1) { len--; number <<= 8; } return len; } /* end asn_predict_length() */ /*******************************************************************-o-****** * asn_predict_length * * Parameters: * type * *ptr * u_char_len * * Returns: * Length in bytes: 1 + <n> + <u_char_len>, where * * 1 For the ASN.1 type. * <n> # of bytes to store length of data. * <u_char_len> Length of data associated with ASN.1 type. * * This gives the number of bytes that the ASN.1 encoder (in asn1.c) will * use to encode a particular integer value. This is as broken as the * currently used encoder. * * XXX How is <n> chosen, exactly?? */ static int asn_predict_length(int type, u_char * ptr, size_t u_char_len) { if (type & ASN_SEQUENCE) return 1 + 3 + u_char_len; if (type & ASN_INTEGER) { u_long value; memcpy(&value, ptr, u_char_len); u_char_len = asn_predict_int_length(type, value, u_char_len); } if (u_char_len < 0x80) return 1 + 1 + u_char_len; else if (u_char_len < 0xFF) return 1 + 2 + u_char_len; else return 1 + 3 + u_char_len; } /* end asn_predict_length() */ /*******************************************************************-o-****** * usm_calc_offsets * * Parameters: * (See list below...) * * Returns: * 0 On success, * -1 Otherwise. * * * This routine calculates the offsets into an outgoing message buffer * for the necessary values. The outgoing buffer will generically * look like this: * * SNMPv3 Message * SEQ len[11] * INT len version * Header * SEQ len * INT len MsgID * INT len msgMaxSize * OST len msgFlags (OST = OCTET STRING) * INT len msgSecurityModel * MsgSecurityParameters * [1] OST len[2] * SEQ len[3] * OST len msgAuthoritativeEngineID * INT len msgAuthoritativeEngineBoots * INT len msgAuthoritativeEngineTime * OST len msgUserName * OST len[4] [5] msgAuthenticationParameters * OST len[6] [7] msgPrivacyParameters * MsgData * [8] OST len[9] [10] encryptedPDU * or * [8,10] SEQUENCE len[9] scopedPDU * [12] * * The bracketed points will be needed to be identified ([x] is an index * value, len[x] means a length value). Here is a semantic guide to them: * * [1] = globalDataLen (input) * [2] = otstlen * [3] = seq_len * [4] = msgAuthParmLen (may be 0 or 12) * [5] = authParamsOffset * [6] = msgPrivParmLen (may be 0 or 8) * [7] = privParamsOffset * [8] = globalDataLen + msgSecParmLen * [9] = datalen * [10] = dataOffset * [11] = theTotalLength - the length of the header itself * [12] = theTotalLength */ static int usm_calc_offsets(size_t globalDataLen, /* SNMPv3Message + HeaderData */ int secLevel, size_t secEngineIDLen, size_t secNameLen, size_t scopedPduLen, /* An BER encoded sequence. */ u_long engineboots, /* XXX (asn1.c works in long, not int.) */ long engine_time, /* XXX (asn1.c works in long, not int.) */ size_t * theTotalLength, /* globalDataLen + msgSecurityP. + msgData */ size_t * authParamsOffset, /* Distance to auth bytes. */ size_t * privParamsOffset, /* Distance to priv bytes. */ size_t * dataOffset, /* Distance to scopedPdu SEQ -or- the * crypted (data) portion of msgData. */ size_t * datalen, /* Size of msgData OCTET STRING encoding. */ size_t * msgAuthParmLen, /* Size of msgAuthenticationParameters. */ size_t * msgPrivParmLen, /* Size of msgPrivacyParameters. */ size_t * otstlen, /* Size of msgSecurityP. O.S. encoding. */ size_t * seq_len, /* Size of msgSecurityP. SEQ data. */ size_t * msgSecParmLen) { /* Size of msgSecurityP. SEQ. */ int engIDlen, /* Sizes of OCTET STRING and SEQ encodings */ engBtlen, /* for fields within */ engTmlen, /* msgSecurityParameters portion of */ namelen, /* SNMPv3Message. */ authlen, privlen, ret; /* * If doing authentication, msgAuthParmLen = 12 else msgAuthParmLen = 0. * If doing encryption, msgPrivParmLen = 8 else msgPrivParmLen = 0. */ *msgAuthParmLen = (secLevel == SNMP_SEC_LEVEL_AUTHNOPRIV || secLevel == SNMP_SEC_LEVEL_AUTHPRIV) ? 12 : 0; *msgPrivParmLen = (secLevel == SNMP_SEC_LEVEL_AUTHPRIV) ? 8 : 0; /* * Calculate lengths. */ if ((engIDlen = asn_predict_length(ASN_OCTET_STR, NULL, secEngineIDLen)) == -1) { return -1; } if ((engBtlen = asn_predict_length(ASN_INTEGER, (u_char *) & engineboots, sizeof(long))) == -1) { return -1; } if ((engTmlen = asn_predict_length(ASN_INTEGER, (u_char *) & engine_time, sizeof(long))) == -1) { return -1; } if ((namelen = asn_predict_length(ASN_OCTET_STR, NULL, secNameLen)) == -1) { return -1; } if ((authlen = asn_predict_length(ASN_OCTET_STR, NULL, *msgAuthParmLen)) == -1) { return -1; } if ((privlen = asn_predict_length(ASN_OCTET_STR, NULL, *msgPrivParmLen)) == -1) { return -1; } *seq_len = engIDlen + engBtlen + engTmlen + namelen + authlen + privlen; if ((ret = asn_predict_length(ASN_SEQUENCE, NULL, *seq_len)) == -1) { return -1; } *otstlen = (size_t)ret; if ((ret = asn_predict_length(ASN_OCTET_STR, NULL, *otstlen)) == -1) { return -1; } *msgSecParmLen = (size_t)ret; *authParamsOffset = globalDataLen + +(*msgSecParmLen - *seq_len) + engIDlen + engBtlen + engTmlen + namelen + (authlen - *msgAuthParmLen); *privParamsOffset = *authParamsOffset + *msgAuthParmLen + (privlen - *msgPrivParmLen); /* * Compute the size of the plaintext. Round up to account for cipher * block size, if necessary. * * XXX This is hardwired for 1DES... If scopedPduLen is already * a multiple of 8, then *add* 8 more; otherwise, round up * to the next multiple of 8. * * FIX Calculation of encrypted portion of msgData and consequent * setting and sanity checking of theTotalLength, et al. should * occur *after* encryption has taken place. */ if (secLevel == SNMP_SEC_LEVEL_AUTHPRIV) { scopedPduLen = ROUNDUP8(scopedPduLen); if ((ret = asn_predict_length(ASN_OCTET_STR, NULL, scopedPduLen)) == -1) { return -1; } *datalen = (size_t)ret; } else { *datalen = scopedPduLen; } *dataOffset = globalDataLen + *msgSecParmLen + (*datalen - scopedPduLen); *theTotalLength = globalDataLen + *msgSecParmLen + *datalen; return 0; } /* end usm_calc_offsets() */ #ifndef NETSNMP_DISABLE_DES /*******************************************************************-o-****** * usm_set_salt * * Parameters: * *iv (O) Buffer to contain IV. * *iv_length (O) Length of iv. * *priv_salt (I) Salt portion of private key. * priv_salt_length (I) Length of priv_salt. * *msgSalt (I/O) Pointer salt portion of outgoing msg buffer. * * Returns: * 0 On success, * -1 Otherwise. * * Determine the initialization vector for the DES-CBC encryption. * (Cf. RFC 2274, 8.1.1.1.) * * iv is defined as the concatenation of engineBoots and the * salt integer. * The salt integer is incremented. * The resulting salt is copied into the msgSalt buffer. * The result of the concatenation is then XORed with the salt * portion of the private key (last 8 bytes). * The IV result is returned individually for further use. */ static int usm_set_salt(u_char * iv, size_t * iv_length, u_char * priv_salt, size_t priv_salt_length, u_char * msgSalt) { size_t propersize_salt = BYTESIZE(USM_DES_SALT_LENGTH); int net_boots; int net_salt_int; /* * net_* should be encoded in network byte order. XXX Why? */ int iindex; /* * Sanity check. */ if (!iv || !iv_length || !priv_salt || (*iv_length != propersize_salt) || (priv_salt_length < propersize_salt)) { return -1; } net_boots = htonl(snmpv3_local_snmpEngineBoots()); net_salt_int = htonl(salt_integer); salt_integer += 1; memcpy(iv, &net_boots, propersize_salt / 2); memcpy(iv + (propersize_salt / 2), &net_salt_int, propersize_salt / 2); if (msgSalt) memcpy(msgSalt, iv, propersize_salt); /* * Turn the salt into an IV: XOR <boots, salt_int> with salt * portion of priv_key. */ for (iindex = 0; iindex < (int) propersize_salt; iindex++) iv[iindex] ^= priv_salt[iindex]; return 0; } /* end usm_set_salt() */ #endif #ifdef HAVE_AES /*******************************************************************-o-****** * usm_set_aes_iv * * Parameters: * *iv (O) Buffer to contain IV. * *iv_length (O) Length of iv. * net_boots (I) the network byte order of the authEng boots val * net_time (I) the network byte order of the authEng time val * *salt (O) A buffer for the outgoing salt (= 8 bytes of iv) * * Returns: * 0 On success, * -1 Otherwise. * * Determine the initialization vector for AES encryption. * (draft-blumenthal-aes-usm-03.txt, 3.1.2.2) * * iv is defined as the concatenation of engineBoots, engineTime and a 64 bit salt-integer. * The 64 bit salt integer is incremented. * The resulting salt is copied into the salt buffer. * The IV result is returned individually for further use. */ static int usm_set_aes_iv(u_char * iv, size_t * iv_length, u_int net_boots, u_int net_time, u_char * salt) { /* * net_* should be encoded in network byte order. */ int net_salt_int1, net_salt_int2; #define PROPER_AES_IV_SIZE 64 /* * Sanity check. */ if (!iv || !iv_length) { return -1; } net_salt_int1 = htonl(salt_integer64_1); net_salt_int2 = htonl(salt_integer64_2); if ((salt_integer64_2 += 1) == 0) salt_integer64_2 += 1; /* XXX: warning: hard coded proper lengths */ memcpy(iv, &net_boots, 4); memcpy(iv+4, &net_time, 4); memcpy(iv+8, &net_salt_int1, 4); memcpy(iv+12, &net_salt_int2, 4); memcpy(salt, iv+8, 8); /* only copy the needed portion */ return 0; } /* end usm_set_aes_iv() */ #endif /* HAVE_AES */ /*******************************************************************-o-****** * usm_check_secLevel_vs_protocols * * Parameters: * level * *authProtocol * authProtocolLen * *privProtocol * privProtocolLen * * Returns: * 0 On success, * 1 Otherwise. * * Same as above but with explicitly named transform types instead of taking * from the usmUser structure. */ static int usm_check_secLevel_vs_protocols(int level, const oid * authProtocol, u_int authProtocolLen, const oid * privProtocol, u_int privProtocolLen) { if (level == SNMP_SEC_LEVEL_AUTHPRIV && (netsnmp_oid_equals (privProtocol, privProtocolLen, usmNoPrivProtocol, OID_LENGTH(usmNoPrivProtocol)) == 0)) { DEBUGMSGTL(("usm", "Level: %d\n", level)); DEBUGMSGTL(("usm", "Auth Protocol: ")); DEBUGMSGOID(("usm", authProtocol, authProtocolLen)); DEBUGMSG(("usm", ", Priv Protocol: ")); DEBUGMSGOID(("usm", privProtocol, privProtocolLen)); DEBUGMSG(("usm", "\n")); return 1; } if ((level == SNMP_SEC_LEVEL_AUTHPRIV || level == SNMP_SEC_LEVEL_AUTHNOPRIV) && (netsnmp_oid_equals (authProtocol, authProtocolLen, usmNoAuthProtocol, OID_LENGTH(usmNoAuthProtocol)) == 0)) { DEBUGMSGTL(("usm", "Level: %d\n", level)); DEBUGMSGTL(("usm", "Auth Protocol: ")); DEBUGMSGOID(("usm", authProtocol, authProtocolLen)); DEBUGMSG(("usm", ", Priv Protocol: ")); DEBUGMSGOID(("usm", privProtocol, privProtocolLen)); DEBUGMSG(("usm", "\n")); return 1; } return 0; } /* end usm_check_secLevel_vs_protocols() */ /*******************************************************************-o-****** * usm_generate_out_msg * * Parameters: * (See list below...) * * Returns: * SNMPERR_SUCCESS On success. * SNMPERR_USM_AUTHENTICATIONFAILURE * SNMPERR_USM_ENCRYPTIONERROR * SNMPERR_USM_GENERICERROR * SNMPERR_USM_UNKNOWNSECURITYNAME * SNMPERR_USM_GENERICERROR * SNMPERR_USM_UNSUPPORTEDSECURITYLEVEL * * * Generates an outgoing message. * * XXX Beware of misnomers! */ static int usm_generate_out_msg(int msgProcModel, /* (UNUSED) */ u_char * globalData, /* IN */ /* * Pointer to msg header data will point to the beginning * * of the entire packet buffer to be transmitted on wire, * * memory will be contiguous with secParams, typically * * this pointer will be passed back as beginning of * * wholeMsg below. asn seq. length is updated w/ new length. * * * * While this points to a buffer that should be big enough * * for the whole message, only the first two parts * * of the message are completed, namely SNMPv3Message and * * HeaderData. globalDataLen (next parameter) represents * * the length of these two completed parts. */ size_t globalDataLen, /* IN - Length of msg header data. */ int maxMsgSize, /* (UNUSED) */ int secModel, /* (UNUSED) */ const u_char *secEngineID, /* IN - Pointer snmpEngineID. */ size_t secEngineIDLen, /* IN - SnmpEngineID length. */ const char *secName, /* IN - Pointer to securityName.*/ size_t secNameLen, /* IN - SecurityName length. */ int secLevel, /* IN - AuthNoPriv, authPriv etc. */ const u_char *scopedPdu, /* IN */ /* * Pointer to scopedPdu will be encrypted by USM if needed * * and written to packet buffer immediately following * * securityParameters, entire msg will be authenticated by * * USM if needed. */ size_t scopedPduLen, /* IN - scopedPdu length. */ const void *secStateRef, /* IN */ /* * secStateRef, pointer to cached info provided only for * * Response, otherwise NULL. */ u_char * secParams, /* OUT */ /* * BER encoded securityParameters pointer to offset within * * packet buffer where secParams should be written, the * * entire BER encoded OCTET STRING (including header) is * * written here by USM secParams = globalData + * * globalDataLen. */ size_t * secParamsLen, /* IN/OUT - Len available, len returned. */ u_char ** wholeMsg, /* OUT */ /* * Complete authenticated/encrypted message - typically * * the pointer to start of packet buffer provided in * * globalData is returned here, could also be a separate * * buffer. */ size_t * wholeMsgLen) { /* IN/OUT - Len available, len returned. */ size_t otstlen; size_t seq_len; size_t msgAuthParmLen; size_t msgPrivParmLen; size_t msgSecParmLen; size_t authParamsOffset; size_t privParamsOffset; size_t datalen; size_t dataOffset; size_t theTotalLength; u_char *ptr; size_t ptr_len; size_t remaining; size_t offSet; u_int boots_uint; u_int time_uint; long boots_long; long time_long; /* * Indirection because secStateRef values override parameters. * * None of these are to be free'd - they are either pointing to * what's in the secStateRef or to something either in the * actual prarmeter list or the user list. */ const char *theName = NULL; u_int theNameLength = 0; const u_char *theEngineID = NULL; u_int theEngineIDLength = 0; u_char *theAuthKey = NULL; u_int theAuthKeyLength = 0; const oid *theAuthProtocol = NULL; u_int theAuthProtocolLength = 0; u_char *thePrivKey = NULL; u_int thePrivKeyLength = 0; const oid *thePrivProtocol = NULL; u_int thePrivProtocolLength = 0; int theSecLevel = 0; /* No defined const for bad * value (other then err). */ DEBUGMSGTL(("usm", "USM processing has begun.\n")); if (secStateRef != NULL) { /* * To hush the compiler for now. XXX */ const struct usmStateReference *ref = secStateRef; theName = ref->usr_name; theNameLength = ref->usr_name_length; theEngineID = ref->usr_engine_id; theEngineIDLength = ref->usr_engine_id_length; if (!theEngineIDLength) { theEngineID = secEngineID; theEngineIDLength = secEngineIDLen; } theAuthProtocol = ref->usr_auth_protocol; theAuthProtocolLength = ref->usr_auth_protocol_length; theAuthKey = ref->usr_auth_key; theAuthKeyLength = ref->usr_auth_key_length; thePrivProtocol = ref->usr_priv_protocol; thePrivProtocolLength = ref->usr_priv_protocol_length; thePrivKey = ref->usr_priv_key; thePrivKeyLength = ref->usr_priv_key_length; theSecLevel = ref->usr_sec_level; } /* * Identify the user record. */ else { struct usmUser *user; /* * we do allow an unknown user name for * unauthenticated requests. */ user = usm_get_user2(secEngineID, secEngineIDLen, secName, secNameLen); if (user == NULL && secLevel != SNMP_SEC_LEVEL_NOAUTH) { DEBUGMSGTL(("usm", "Unknown User(%s)\n", secName)); return SNMPERR_USM_UNKNOWNSECURITYNAME; } theName = secName; theNameLength = secNameLen; theEngineID = secEngineID; theSecLevel = secLevel; theEngineIDLength = secEngineIDLen; if (user) { theAuthProtocol = user->authProtocol; theAuthProtocolLength = user->authProtocolLen; theAuthKey = user->authKey; theAuthKeyLength = user->authKeyLen; thePrivProtocol = user->privProtocol; thePrivProtocolLength = user->privProtocolLen; thePrivKey = user->privKey; thePrivKeyLength = user->privKeyLen; } else { /* * unknown users can not do authentication (obviously) */ theAuthProtocol = usmNoAuthProtocol; theAuthProtocolLength = OID_LENGTH(usmNoAuthProtocol); theAuthKey = NULL; theAuthKeyLength = 0; thePrivProtocol = usmNoPrivProtocol; thePrivProtocolLength = OID_LENGTH(usmNoPrivProtocol); thePrivKey = NULL; thePrivKeyLength = 0; } } /* endif -- secStateRef==NULL */ /* * From here to the end of the function, avoid reference to * secName, secEngineID, secLevel, and associated lengths. */ /* * Check to see if the user can use the requested sec services. */ if (usm_check_secLevel_vs_protocols(theSecLevel, theAuthProtocol, theAuthProtocolLength, thePrivProtocol, thePrivProtocolLength) == 1) { DEBUGMSGTL(("usm", "Unsupported Security Level (%d)\n", theSecLevel)); return SNMPERR_USM_UNSUPPORTEDSECURITYLEVEL; } /* * Retrieve the engine information. * * XXX No error is declared in the EoP when sending messages to * unknown engines, processing continues w/ boots/time == (0,0). */ if (get_enginetime(theEngineID, theEngineIDLength, &boots_uint, &time_uint, FALSE) == -1) { DEBUGMSGTL(("usm", "%s\n", "Failed to find engine data.")); } boots_long = boots_uint; time_long = time_uint; /* * Set up the Offsets. */ if (usm_calc_offsets(globalDataLen, theSecLevel, theEngineIDLength, theNameLength, scopedPduLen, boots_long, time_long, &theTotalLength, &authParamsOffset, &privParamsOffset, &dataOffset, &datalen, &msgAuthParmLen, &msgPrivParmLen, &otstlen, &seq_len, &msgSecParmLen) == -1) { DEBUGMSGTL(("usm", "Failed calculating offsets.\n")); return SNMPERR_USM_GENERICERROR; } /* * So, we have the offsets for the three parts that need to be * determined, and an overall length. Now we need to make * sure all of this would fit in the outgoing buffer, and * whether or not we need to make a new buffer, etc. */ /* * Set wholeMsg as a pointer to globalData. Sanity check for * the proper size. * * Mark workspace in the message with bytes of all 1's to make it * easier to find mistakes in raw message dumps. */ ptr = *wholeMsg = globalData; if (theTotalLength > *wholeMsgLen) { DEBUGMSGTL(("usm", "Message won't fit in buffer.\n")); return SNMPERR_USM_GENERICERROR; } ptr_len = *wholeMsgLen = theTotalLength; #ifdef NETSNMP_ENABLE_TESTING_CODE memset(&ptr[globalDataLen], 0xFF, theTotalLength - globalDataLen); #endif /* NETSNMP_ENABLE_TESTING_CODE */ /* * Do the encryption. */ if (theSecLevel == SNMP_SEC_LEVEL_AUTHPRIV) { size_t encrypted_length = theTotalLength - dataOffset; size_t salt_length = BYTESIZE(USM_MAX_SALT_LENGTH); u_char salt[BYTESIZE(USM_MAX_SALT_LENGTH)]; int priv_type = sc_get_privtype(thePrivProtocol, thePrivProtocolLength); #ifdef HAVE_AES if (USM_CREATE_USER_PRIV_AES == (priv_type & USM_PRIV_MASK_ALG)) { if (!thePrivKey || usm_set_aes_iv(salt, &salt_length, htonl(boots_uint), htonl(time_uint), &ptr[privParamsOffset]) == -1) { DEBUGMSGTL(("usm", "Can't set AES iv.\n")); return SNMPERR_USM_GENERICERROR; } } #endif #ifndef NETSNMP_DISABLE_DES /* * XXX Hardwired to seek into a 1DES private key! */ if (USM_CREATE_USER_PRIV_DES == (priv_type & USM_PRIV_MASK_ALG)) { if (!thePrivKey || (usm_set_salt(salt, &salt_length, thePrivKey + 8, thePrivKeyLength - 8, &ptr[privParamsOffset]) == -1)) { DEBUGMSGTL(("usm", "Can't set DES-CBC salt.\n")); return SNMPERR_USM_GENERICERROR; } } #endif if (sc_encrypt(thePrivProtocol, thePrivProtocolLength, thePrivKey, thePrivKeyLength, salt, salt_length, scopedPdu, scopedPduLen, &ptr[dataOffset], &encrypted_length) != SNMP_ERR_NOERROR) { DEBUGMSGTL(("usm", "encryption error.\n")); return SNMPERR_USM_ENCRYPTIONERROR; } #ifdef NETSNMP_ENABLE_TESTING_CODE if (debug_is_token_registered("usm/dump") == SNMPERR_SUCCESS) { dump_chunk("usm/dump", "This data was encrypted:", scopedPdu, scopedPduLen); dump_chunk("usm/dump", "salt + Encrypted form:", salt, salt_length); dump_chunk("usm/dump", NULL, &ptr[dataOffset], encrypted_length); dump_chunk("usm/dump", "*wholeMsg:", *wholeMsg, theTotalLength); } #endif ptr = *wholeMsg; ptr_len = *wholeMsgLen = theTotalLength; /* * XXX Sanity check for salt length should be moved up * under usm_calc_offsets() or tossed. */ if ((encrypted_length != (theTotalLength - dataOffset)) || (salt_length != msgPrivParmLen)) { DEBUGMSGTL(("usm", "encryption length error.\n")); return SNMPERR_USM_ENCRYPTIONERROR; } DEBUGMSGTL(("usm", "Encryption successful.\n")); } /* * No encryption for you! */ else { memcpy(&ptr[dataOffset], scopedPdu, scopedPduLen); } /* * Start filling in the other fields (in prep for authentication). * * offSet is an octet string header, which is different from all * the other headers. */ remaining = ptr_len - globalDataLen; offSet = ptr_len - remaining; asn_build_header(&ptr[offSet], &remaining, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), otstlen); offSet = ptr_len - remaining; asn_build_sequence(&ptr[offSet], &remaining, (u_char) (ASN_SEQUENCE | ASN_CONSTRUCTOR), seq_len); offSet = ptr_len - remaining; DEBUGDUMPHEADER("send", "msgAuthoritativeEngineID"); asn_build_string(&ptr[offSet], &remaining, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), theEngineID, theEngineIDLength); DEBUGINDENTLESS(); offSet = ptr_len - remaining; DEBUGDUMPHEADER("send", "msgAuthoritativeEngineBoots"); asn_build_int(&ptr[offSet], &remaining, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_INTEGER), &boots_long, sizeof(long)); DEBUGINDENTLESS(); offSet = ptr_len - remaining; DEBUGDUMPHEADER("send", "msgAuthoritativeEngineTime"); asn_build_int(&ptr[offSet], &remaining, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_INTEGER), &time_long, sizeof(long)); DEBUGINDENTLESS(); offSet = ptr_len - remaining; DEBUGDUMPHEADER("send", "msgUserName"); asn_build_string(&ptr[offSet], &remaining, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), (const u_char *) theName, theNameLength); DEBUGINDENTLESS(); /* * Note: if there is no authentication being done, * msgAuthParmLen is 0, and there is no effect (other than * inserting a zero-length header) of the following * statements. */ offSet = ptr_len - remaining; asn_build_header(&ptr[offSet], &remaining, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), msgAuthParmLen); if (theSecLevel == SNMP_SEC_LEVEL_AUTHNOPRIV || theSecLevel == SNMP_SEC_LEVEL_AUTHPRIV) { offSet = ptr_len - remaining; memset(&ptr[offSet], 0, msgAuthParmLen); } remaining -= msgAuthParmLen; /* * Note: if there is no encryption being done, msgPrivParmLen * is 0, and there is no effect (other than inserting a * zero-length header) of the following statements. */ offSet = ptr_len - remaining; asn_build_header(&ptr[offSet], &remaining, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), msgPrivParmLen); remaining -= msgPrivParmLen; /* Skipping the IV already there. */ /* * For privacy, need to add the octet string header for it. */ if (theSecLevel == SNMP_SEC_LEVEL_AUTHPRIV) { offSet = ptr_len - remaining; asn_build_header(&ptr[offSet], &remaining, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), theTotalLength - dataOffset); } /* * Adjust overall length and store it as the first SEQ length * of the SNMPv3Message. * * FIX 4 is a magic number! */ remaining = theTotalLength; asn_build_sequence(ptr, &remaining, (u_char) (ASN_SEQUENCE | ASN_CONSTRUCTOR), theTotalLength - 4); /* * Now, time to consider / do authentication. */ if (theSecLevel == SNMP_SEC_LEVEL_AUTHNOPRIV || theSecLevel == SNMP_SEC_LEVEL_AUTHPRIV) { size_t temp_sig_len = msgAuthParmLen; u_char *temp_sig = (u_char *) malloc(temp_sig_len); if (temp_sig == NULL) { DEBUGMSGTL(("usm", "Out of memory.\n")); return SNMPERR_USM_GENERICERROR; } if (sc_generate_keyed_hash(theAuthProtocol, theAuthProtocolLength, theAuthKey, theAuthKeyLength, ptr, ptr_len, temp_sig, &temp_sig_len) != SNMP_ERR_NOERROR) { /* * FIX temp_sig_len defined?! */ SNMP_ZERO(temp_sig, temp_sig_len); SNMP_FREE(temp_sig); DEBUGMSGTL(("usm", "Signing failed.\n")); return SNMPERR_USM_AUTHENTICATIONFAILURE; } if (temp_sig_len != msgAuthParmLen) { SNMP_ZERO(temp_sig, temp_sig_len); SNMP_FREE(temp_sig); DEBUGMSGTL(("usm", "Signing lengths failed.\n")); return SNMPERR_USM_AUTHENTICATIONFAILURE; } memcpy(&ptr[authParamsOffset], temp_sig, msgAuthParmLen); SNMP_ZERO(temp_sig, temp_sig_len); SNMP_FREE(temp_sig); } /* * endif -- create keyed hash */ DEBUGMSGTL(("usm", "USM processing completed.\n")); return SNMPERR_SUCCESS; } /* end usm_generate_out_msg() */ static int usm_secmod_generate_out_msg(struct snmp_secmod_outgoing_params *parms) { if (!parms) return SNMPERR_GENERR; return usm_generate_out_msg(parms->msgProcModel, parms->globalData, parms->globalDataLen, parms->maxMsgSize, parms->secModel, parms->secEngineID, parms->secEngineIDLen, parms->secName, parms->secNameLen, parms->secLevel, parms->scopedPdu, parms->scopedPduLen, parms->secStateRef, parms->secParams, parms->secParamsLen, parms->wholeMsg, parms->wholeMsgLen); } #ifdef NETSNMP_USE_REVERSE_ASNENCODING static int usm_rgenerate_out_msg(int msgProcModel, /* (UNUSED) */ u_char * globalData, /* IN */ /* * points at the msgGlobalData, which is of length given by next * parameter. */ size_t globalDataLen, /* IN - Length of msg header data. */ int maxMsgSize, /* (UNUSED) */ int secModel, /* (UNUSED) */ const u_char *secEngineID, /* IN - Pointer snmpEngineID.*/ size_t secEngineIDLen, /* IN - SnmpEngineID length. */ const char *secName, /* IN - Pointer to securityName.*/ size_t secNameLen, /* IN - SecurityName length. */ int secLevel, /* IN - AuthNoPriv, authPriv etc. */ const u_char *scopedPdu, /* IN */ /* * Pointer to scopedPdu will be encrypted by USM if needed * * and written to packet buffer immediately following * * securityParameters, entire msg will be authenticated by * * USM if needed. */ size_t scopedPduLen, /* IN - scopedPdu length. */ const void *secStateRef, /* IN */ /* * secStateRef, pointer to cached info provided only for * * Response, otherwise NULL. */ u_char ** wholeMsg, /* IN/OUT */ /* * Points at the pointer to the packet buffer, which might get extended * if necessary via realloc(). */ size_t * wholeMsgLen, /* IN/OUT */ /* * Length of the entire packet buffer, **not** the length of the * packet. */ size_t * offset /* IN/OUT */ /* * Offset from the end of the packet buffer to the start of the packet, * also known as the packet length. */ ) { size_t msgAuthParmLen = 0; u_int boots_uint; u_int time_uint; long boots_long; long time_long; /* * Indirection because secStateRef values override parameters. * * None of these are to be free'd - they are either pointing to * what's in the secStateRef or to something either in the * actual parameter list or the user list. */ const char *theName = NULL; u_int theNameLength = 0; const u_char *theEngineID = NULL; u_int theEngineIDLength = 0; u_char *theAuthKey = NULL; u_int theAuthKeyLength = 0; const oid *theAuthProtocol = NULL; u_int theAuthProtocolLength = 0; u_char *thePrivKey = NULL; u_int thePrivKeyLength = 0; const oid *thePrivProtocol = NULL; u_int thePrivProtocolLength = 0; int theSecLevel = 0; /* No defined const for bad * value (other then err). */ size_t salt_length = 0, save_salt_length = 0; u_char salt[BYTESIZE(USM_MAX_SALT_LENGTH)]; u_char authParams[USM_MAX_AUTHSIZE]; u_char iv[BYTESIZE(USM_MAX_SALT_LENGTH)]; size_t sp_offset = 0, mac_offset = 0; int rc = 0; DEBUGMSGTL(("usm", "USM processing has begun (offset %d)\n", (int)*offset)); if (secStateRef != NULL) { /* * To hush the compiler for now. XXX */ const struct usmStateReference *ref = secStateRef; theName = ref->usr_name; theNameLength = ref->usr_name_length; theEngineID = ref->usr_engine_id; theEngineIDLength = ref->usr_engine_id_length; if (!theEngineIDLength) { theEngineID = secEngineID; theEngineIDLength = secEngineIDLen; } theAuthProtocol = ref->usr_auth_protocol; theAuthProtocolLength = ref->usr_auth_protocol_length; theAuthKey = ref->usr_auth_key; theAuthKeyLength = ref->usr_auth_key_length; thePrivProtocol = ref->usr_priv_protocol; thePrivProtocolLength = ref->usr_priv_protocol_length; thePrivKey = ref->usr_priv_key; thePrivKeyLength = ref->usr_priv_key_length; theSecLevel = ref->usr_sec_level; } /* * * Identify the user record. */ else { struct usmUser *user; /* * we do allow an unknown user name for * unauthenticated requests. */ user = usm_get_user2(secEngineID, secEngineIDLen, secName, secNameLen); if (user == NULL && secLevel != SNMP_SEC_LEVEL_NOAUTH) { DEBUGMSGTL(("usm", "Unknown User\n")); return SNMPERR_USM_UNKNOWNSECURITYNAME; } theName = secName; theNameLength = secNameLen; theEngineID = secEngineID; theSecLevel = secLevel; theEngineIDLength = secEngineIDLen; if (user) { theAuthProtocol = user->authProtocol; theAuthProtocolLength = user->authProtocolLen; theAuthKey = user->authKey; theAuthKeyLength = user->authKeyLen; thePrivProtocol = user->privProtocol; thePrivProtocolLength = user->privProtocolLen; thePrivKey = user->privKey; thePrivKeyLength = user->privKeyLen; } else { /* * unknown users can not do authentication (obviously) */ theAuthProtocol = usmNoAuthProtocol; theAuthProtocolLength = OID_LENGTH(usmNoAuthProtocol); theAuthKey = NULL; theAuthKeyLength = 0; thePrivProtocol = usmNoPrivProtocol; thePrivProtocolLength = OID_LENGTH(usmNoPrivProtocol); thePrivKey = NULL; thePrivKeyLength = 0; } } /* endif -- secStateRef==NULL */ /* * From here to the end of the function, avoid reference to * secName, secEngineID, secLevel, and associated lengths. */ /* * Check to see if the user can use the requested sec services. */ if (usm_check_secLevel_vs_protocols(theSecLevel, theAuthProtocol, theAuthProtocolLength, thePrivProtocol, thePrivProtocolLength) == 1) { DEBUGMSGTL(("usm", "Unsupported Security Level or type (%d)\n", theSecLevel)); return SNMPERR_USM_UNSUPPORTEDSECURITYLEVEL; } /* * * Retrieve the engine information. * * * * XXX No error is declared in the EoP when sending messages to * * unknown engines, processing continues w/ boots/time == (0,0). */ if (get_enginetime(theEngineID, theEngineIDLength, &boots_uint, &time_uint, FALSE) == -1) { DEBUGMSGTL(("usm", "%s\n", "Failed to find engine data.")); } boots_long = boots_uint; time_long = time_uint; if (theSecLevel == SNMP_SEC_LEVEL_AUTHPRIV) { /* * Initially assume that the ciphertext will end up the same size as * the plaintext plus some padding. Really sc_encrypt ought to be able * to grow this for us, a la asn_realloc_rbuild_<type> functions, but * this will do for now. */ u_char *ciphertext = NULL; size_t ciphertextlen = scopedPduLen + 64; int priv_type = sc_get_privtype(thePrivProtocol, thePrivProtocolLength); if ((ciphertext = (u_char *) malloc(ciphertextlen)) == NULL) { DEBUGMSGTL(("usm", "couldn't malloc %d bytes for encrypted PDU\n", (int)ciphertextlen)); return SNMPERR_MALLOC; } /* * XXX Hardwired to seek into a 1DES private key! */ #ifdef HAVE_AES if (USM_CREATE_USER_PRIV_AES == (priv_type & USM_PRIV_MASK_ALG)) { salt_length = BYTESIZE(USM_AES_SALT_LENGTH); save_salt_length = BYTESIZE(USM_AES_SALT_LENGTH)/2; if (!thePrivKey || usm_set_aes_iv(salt, &salt_length, htonl(boots_uint), htonl(time_uint), iv) == -1) { DEBUGMSGTL(("usm", "Can't set AES iv.\n")); SNMP_FREE(ciphertext); return SNMPERR_USM_GENERICERROR; } } #endif #ifndef NETSNMP_DISABLE_DES if (USM_CREATE_USER_PRIV_DES == (priv_type & USM_PRIV_MASK_ALG)) { salt_length = BYTESIZE(USM_DES_SALT_LENGTH); save_salt_length = BYTESIZE(USM_DES_SALT_LENGTH); if (!thePrivKey || (usm_set_salt(salt, &salt_length, thePrivKey + 8, thePrivKeyLength - 8, iv) == -1)) { DEBUGMSGTL(("usm", "Can't set DES-CBC salt.\n")); SNMP_FREE(ciphertext); return SNMPERR_USM_GENERICERROR; } } #endif #ifdef NETSNMP_ENABLE_TESTING_CODE if (debug_is_token_registered("usm/dump") == SNMPERR_SUCCESS) { dump_chunk("usm/dump", "This data was encrypted:", scopedPdu, scopedPduLen); } #endif if (sc_encrypt(thePrivProtocol, thePrivProtocolLength, thePrivKey, thePrivKeyLength, salt, salt_length, scopedPdu, scopedPduLen, ciphertext, &ciphertextlen) != SNMP_ERR_NOERROR) { DEBUGMSGTL(("usm", "encryption error.\n")); SNMP_FREE(ciphertext); return SNMPERR_USM_ENCRYPTIONERROR; } /* * Write the encrypted scopedPdu back into the packet buffer. */ *offset = 0; rc = asn_realloc_rbuild_string(wholeMsg, wholeMsgLen, offset, 1, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), ciphertext, ciphertextlen); if (rc == 0) { DEBUGMSGTL(("usm", "Encryption failed.\n")); SNMP_FREE(ciphertext); return SNMPERR_USM_ENCRYPTIONERROR; } #ifdef NETSNMP_ENABLE_TESTING_CODE if (debug_is_token_registered("usm/dump") == SNMPERR_SUCCESS) { dump_chunk("usm/dump", "salt + Encrypted form: ", salt, salt_length); dump_chunk("usm/dump", "wholeMsg:", (*wholeMsg + *wholeMsgLen - *offset), *offset); } #endif DEBUGMSGTL(("usm", "Encryption successful.\n")); SNMP_FREE(ciphertext); } else { /* * theSecLevel != SNMP_SEC_LEVEL_AUTHPRIV */ } /* * Start encoding the msgSecurityParameters. */ sp_offset = *offset; DEBUGDUMPHEADER("send", "msgPrivacyParameters"); /* * msgPrivacyParameters (warning: assumes DES salt). */ rc = asn_realloc_rbuild_string(wholeMsg, wholeMsgLen, offset, 1, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), iv, save_salt_length); DEBUGINDENTLESS(); if (rc == 0) { DEBUGMSGTL(("usm", "building privParams failed.\n")); return SNMPERR_TOO_LONG; } DEBUGDUMPHEADER("send", "msgAuthenticationParameters"); /* * msgAuthenticationParameters. */ if (theSecLevel == SNMP_SEC_LEVEL_AUTHNOPRIV || theSecLevel == SNMP_SEC_LEVEL_AUTHPRIV) { memset(authParams, 0, sizeof(authParams)); msgAuthParmLen = sc_get_auth_maclen(sc_get_authtype(theAuthProtocol, theAuthProtocolLength)); } rc = asn_realloc_rbuild_string(wholeMsg, wholeMsgLen, offset, 1, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), authParams, msgAuthParmLen); DEBUGINDENTLESS(); if (rc == 0) { DEBUGMSGTL(("usm", "building authParams failed.\n")); return SNMPERR_TOO_LONG; } /* * Remember where to put the actual HMAC we calculate later on. An * encoded OCTET STRING of length USM_MD5_AND_SHA_AUTH_LEN has an ASN.1 * header of length 2, hence the fudge factor. This works as long as * auth lengths stay < 127. */ mac_offset = *offset - 2; /* * msgUserName. */ DEBUGDUMPHEADER("send", "msgUserName"); rc = asn_realloc_rbuild_string(wholeMsg, wholeMsgLen, offset, 1, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), (const u_char *) theName, theNameLength); DEBUGINDENTLESS(); if (rc == 0) { DEBUGMSGTL(("usm", "building authParams failed.\n")); return SNMPERR_TOO_LONG; } /* * msgAuthoritativeEngineTime. */ DEBUGDUMPHEADER("send", "msgAuthoritativeEngineTime"); rc = asn_realloc_rbuild_int(wholeMsg, wholeMsgLen, offset, 1, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_INTEGER), &time_long, sizeof(long)); DEBUGINDENTLESS(); if (rc == 0) { DEBUGMSGTL(("usm", "building msgAuthoritativeEngineTime failed.\n")); return SNMPERR_TOO_LONG; } /* * msgAuthoritativeEngineBoots. */ DEBUGDUMPHEADER("send", "msgAuthoritativeEngineBoots"); rc = asn_realloc_rbuild_int(wholeMsg, wholeMsgLen, offset, 1, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_INTEGER), &boots_long, sizeof(long)); DEBUGINDENTLESS(); if (rc == 0) { DEBUGMSGTL(("usm", "building msgAuthoritativeEngineBoots failed.\n")); return SNMPERR_TOO_LONG; } DEBUGDUMPHEADER("send", "msgAuthoritativeEngineID"); rc = asn_realloc_rbuild_string(wholeMsg, wholeMsgLen, offset, 1, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), theEngineID, theEngineIDLength); DEBUGINDENTLESS(); if (rc == 0) { DEBUGMSGTL(("usm", "building msgAuthoritativeEngineID failed.\n")); return SNMPERR_TOO_LONG; } /* * USM msgSecurityParameters sequence header */ rc = asn_realloc_rbuild_sequence(wholeMsg, wholeMsgLen, offset, 1, (u_char) (ASN_SEQUENCE | ASN_CONSTRUCTOR), *offset - sp_offset); if (rc == 0) { DEBUGMSGTL(("usm", "building usm security parameters failed.\n")); return SNMPERR_TOO_LONG; } /* * msgSecurityParameters OCTET STRING wrapper. */ rc = asn_realloc_rbuild_header(wholeMsg, wholeMsgLen, offset, 1, (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), *offset - sp_offset); if (rc == 0) { DEBUGMSGTL(("usm", "building msgSecurityParameters failed.\n")); return SNMPERR_TOO_LONG; } /* * Copy in the msgGlobalData and msgVersion. */ while ((*wholeMsgLen - *offset) < globalDataLen) { if (!asn_realloc(wholeMsg, wholeMsgLen)) { DEBUGMSGTL(("usm", "building global data failed.\n")); return SNMPERR_TOO_LONG; } } *offset += globalDataLen; memcpy(*wholeMsg + *wholeMsgLen - *offset, globalData, globalDataLen); /* * Total packet sequence. */ rc = asn_realloc_rbuild_sequence(wholeMsg, wholeMsgLen, offset, 1, (u_char) (ASN_SEQUENCE | ASN_CONSTRUCTOR), *offset); if (rc == 0) { DEBUGMSGTL(("usm", "building master packet sequence failed.\n")); return SNMPERR_TOO_LONG; } /* * Now consider / do authentication. */ if (theSecLevel == SNMP_SEC_LEVEL_AUTHNOPRIV || theSecLevel == SNMP_SEC_LEVEL_AUTHPRIV) { size_t temp_sig_len = msgAuthParmLen; u_char *temp_sig = (u_char *) malloc(temp_sig_len); u_char *proto_msg = *wholeMsg + *wholeMsgLen - *offset; size_t proto_msg_len = *offset; if (temp_sig == NULL) { DEBUGMSGTL(("usm", "Out of memory.\n")); return SNMPERR_USM_GENERICERROR; } if (sc_generate_keyed_hash(theAuthProtocol, theAuthProtocolLength, theAuthKey, theAuthKeyLength, proto_msg, proto_msg_len, temp_sig, &temp_sig_len) != SNMP_ERR_NOERROR) { SNMP_FREE(temp_sig); DEBUGMSGTL(("usm", "Signing failed.\n")); return SNMPERR_USM_AUTHENTICATIONFAILURE; } if (temp_sig_len != msgAuthParmLen) { SNMP_FREE(temp_sig); DEBUGMSGTL(("usm", "Signing lengths failed.\n")); return SNMPERR_USM_AUTHENTICATIONFAILURE; } memcpy(*wholeMsg + *wholeMsgLen - mac_offset, temp_sig, msgAuthParmLen); SNMP_FREE(temp_sig); } /* * endif -- create keyed hash */ DEBUGMSGTL(("usm", "USM processing completed.\n")); return SNMPERR_SUCCESS; } /* end usm_rgenerate_out_msg() */ static int usm_secmod_rgenerate_out_msg(struct snmp_secmod_outgoing_params *parms) { if (!parms) return SNMPERR_GENERR; return usm_rgenerate_out_msg(parms->msgProcModel, parms->globalData, parms->globalDataLen, parms->maxMsgSize, parms->secModel, parms->secEngineID, parms->secEngineIDLen, parms->secName, parms->secNameLen, parms->secLevel, parms->scopedPdu, parms->scopedPduLen, parms->secStateRef, parms->wholeMsg, parms->wholeMsgLen, parms->wholeMsgOffset); } #endif /* */ /*******************************************************************-o-****** * usm_parse_security_parameters * * Parameters: * (See list below...) * * Returns: * 0 On success, * -1 Otherwise. * * tab stop 4 * * Extracts values from the security header and data portions of the * incoming buffer. */ static int usm_parse_security_parameters(u_char * secParams, size_t remaining, u_char * secEngineID, size_t * secEngineIDLen, u_int * boots_uint, u_int * time_uint, char *secName, size_t * secNameLen, u_char * signature, size_t * signature_length, u_char * salt, size_t * salt_length, u_char ** data_ptr) { u_char *parse_ptr = secParams; u_char *value_ptr; u_char *next_ptr; u_char type_value; size_t octet_string_length = remaining; size_t sequence_length; size_t remaining_bytes; long boots_long; long time_long; u_int origNameLen; /* * Eat the first octet header. */ if ((value_ptr = asn_parse_sequence(parse_ptr, &octet_string_length, &type_value, (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), "usm first octet")) == NULL) { /* * RETURN parse error */ return -1; } /* * Eat the sequence header. */ parse_ptr = value_ptr; sequence_length = octet_string_length; if ((value_ptr = asn_parse_sequence(parse_ptr, &sequence_length, &type_value, (ASN_SEQUENCE | ASN_CONSTRUCTOR), "usm sequence")) == NULL) { /* * RETURN parse error */ return -1; } /* * Retrieve the engineID. */ parse_ptr = value_ptr; remaining_bytes = sequence_length; DEBUGDUMPHEADER("recv", "msgAuthoritativeEngineID"); if ((next_ptr = asn_parse_string(parse_ptr, &remaining_bytes, &type_value, secEngineID, secEngineIDLen)) == NULL) { DEBUGINDENTLESS(); /* * RETURN parse error */ return -1; } DEBUGINDENTLESS(); if (type_value != (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR)) { /* * RETURN parse error */ return -1; } /* * Retrieve the engine boots, notice switch in the way next_ptr and * remaining_bytes are used (to accomodate the asn code). */ DEBUGDUMPHEADER("recv", "msgAuthoritativeEngineBoots"); if ((next_ptr = asn_parse_int(next_ptr, &remaining_bytes, &type_value, &boots_long, sizeof(long))) == NULL) { DEBUGINDENTLESS(); /* * RETURN parse error */ return -1; } DEBUGINDENTLESS(); if (type_value != (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_INTEGER)) { DEBUGINDENTLESS(); /* * RETURN parse error */ return -1; } *boots_uint = (u_int) boots_long; /* * Retrieve the time value. */ DEBUGDUMPHEADER("recv", "msgAuthoritativeEngineTime"); if ((next_ptr = asn_parse_int(next_ptr, &remaining_bytes, &type_value, &time_long, sizeof(long))) == NULL) { /* * RETURN parse error */ return -1; } DEBUGINDENTLESS(); if (type_value != (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_INTEGER)) { /* * RETURN parse error */ return -1; } *time_uint = (u_int) time_long; if (*boots_uint > ENGINEBOOT_MAX || *time_uint > ENGINETIME_MAX) { return -1; } /* * Retrieve the secName. */ origNameLen = *secNameLen; DEBUGDUMPHEADER("recv", "msgUserName"); if ((next_ptr = asn_parse_string(next_ptr, &remaining_bytes, &type_value, (u_char *) secName, secNameLen)) == NULL) { DEBUGINDENTLESS(); /* * RETURN parse error */ return -1; } DEBUGINDENTLESS(); /* * FIX -- doesn't this also indicate a buffer overrun? */ if (origNameLen < *secNameLen + 1) { /* * RETURN parse error, but it's really a parameter error */ return -1; } if (*secNameLen > 32) { /* * This is a USM-specific limitation over and above the above * limitation (which will probably default to the length of an * SnmpAdminString, i.e. 255). See RFC 2574, sec. 2.4. */ return -1; } secName[*secNameLen] = '\0'; if (type_value != (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR)) { /* * RETURN parse error */ return -1; } /* * Retrieve the signature and blank it if there. */ DEBUGDUMPHEADER("recv", "msgAuthenticationParameters"); if ((next_ptr = asn_parse_string(next_ptr, &remaining_bytes, &type_value, signature, signature_length)) == NULL) { DEBUGINDENTLESS(); /* * RETURN parse error */ return -1; } DEBUGINDENTLESS(); if (type_value != (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR)) { /* * RETURN parse error */ return -1; } if (*signature_length != 0) { /* Blanking for authentication step later */ memset(next_ptr - (u_long) * signature_length, 0, *signature_length); } /* * Retrieve the salt. * * Note that the next ptr is where the data section starts. */ DEBUGDUMPHEADER("recv", "msgPrivacyParameters"); if ((*data_ptr = asn_parse_string(next_ptr, &remaining_bytes, &type_value, salt, salt_length)) == NULL) { DEBUGINDENTLESS(); /* * RETURN parse error */ return -2; } DEBUGINDENTLESS(); if (type_value != (u_char) (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR)) { /* * RETURN parse error */ return -2; } return 0; } /* end usm_parse_security_parameters() */ /*******************************************************************-o-****** * usm_check_and_update_timeliness * * Parameters: * *secEngineID * secEngineIDen * boots_uint * time_uint * *error * * Returns: * 0 On success, * -1 Otherwise. * * * Performs the incoming timeliness checking and setting. */ static int usm_check_and_update_timeliness(u_char * secEngineID, size_t secEngineIDLen, u_int boots_uint, u_int time_uint, int *error) { u_char myID[USM_MAX_ID_LENGTH]; u_long myIDLength = snmpv3_get_engineID(myID, USM_MAX_ID_LENGTH); u_int myBoots; u_int myTime; if ((myIDLength > USM_MAX_ID_LENGTH) || (myIDLength == 0)) { /* * We're probably already screwed...buffer overwrite. XXX? */ DEBUGMSGTL(("usm", "Buffer overflow.\n")); *error = SNMPERR_USM_GENERICERROR; return -1; } myBoots = snmpv3_local_snmpEngineBoots(); myTime = snmpv3_local_snmpEngineTime(); /* * IF the time involved is local * Make sure message is inside the time window * ELSE * IF boots is higher or boots is the same and time is higher * remember this new data * ELSE * IF !(boots same and time within USM_TIME_WINDOW secs) * Message is too old * ELSE * Message is ok, but don't take time * ENDIF * ENDIF * ENDIF */ /* * This is a local reference. */ if (secEngineIDLen == myIDLength && memcmp(secEngineID, myID, myIDLength) == 0) { u_int time_difference = myTime > time_uint ? myTime - time_uint : time_uint - myTime; if (boots_uint == ENGINEBOOT_MAX || boots_uint != myBoots || time_difference > USM_TIME_WINDOW) { snmp_increment_statistic(STAT_USMSTATSNOTINTIMEWINDOWS); DEBUGMSGTL(("usm", "boot_uint %u myBoots %u time_diff %u => not in time window\n", boots_uint, myBoots, time_difference)); *error = SNMPERR_USM_NOTINTIMEWINDOW; return -1; } *error = SNMPERR_SUCCESS; return 0; } /* * This is a remote reference. */ else { u_int theirBoots, theirTime, theirLastTime; u_int time_difference; if (get_enginetime_ex(secEngineID, secEngineIDLen, &theirBoots, &theirTime, &theirLastTime, TRUE) != SNMPERR_SUCCESS) { DEBUGMSGTL(("usm", "%s\n", "Failed to get remote engine's times.")); *error = SNMPERR_USM_GENERICERROR; return -1; } time_difference = theirTime > time_uint ? theirTime - time_uint : time_uint - theirTime; /* * XXX Contrary to the pseudocode: * See if boots is invalid first. */ if (theirBoots == ENGINEBOOT_MAX || theirBoots > boots_uint) { DEBUGMSGTL(("usm", "%s\n", "Remote boot count invalid.")); *error = SNMPERR_USM_NOTINTIMEWINDOW; return -1; } /* * Boots is ok, see if the boots is the same but the time * is old. */ if (theirBoots == boots_uint && time_uint < theirLastTime) { if (time_difference > USM_TIME_WINDOW) { DEBUGMSGTL(("usm", "%s\n", "Message too old.")); *error = SNMPERR_USM_NOTINTIMEWINDOW; return -1; } else { /* Old, but acceptable */ *error = SNMPERR_SUCCESS; return 0; } } /* * Message is ok, either boots has been advanced, or * time is greater than before with the same boots. */ if (set_enginetime(secEngineID, secEngineIDLen, boots_uint, time_uint, TRUE) != SNMPERR_SUCCESS) { DEBUGMSGTL(("usm", "%s\n", "Failed updating remote boot/time.")); *error = SNMPERR_USM_GENERICERROR; return -1; } *error = SNMPERR_SUCCESS; return 0; /* Fresh message and time updated */ } /* endif -- local or remote time reference. */ } /* end usm_check_and_update_timeliness() */ /*******************************************************************-o-****** * usm_check_secLevel * * Parameters: * level * *user * * Returns: * 0 On success, * -1 Otherwise. * * Checks that a given security level is valid for a given user. */ static int usm_check_secLevel(int level, struct usmUser *user) { if (user->userStatus != RS_ACTIVE) return -1; DEBUGMSGTL(("comparex", "Comparing: %" NETSNMP_PRIo "u %" NETSNMP_PRIo "u ", usmNoPrivProtocol[0], usmNoPrivProtocol[1])); DEBUGMSGOID(("comparex", usmNoPrivProtocol, OID_LENGTH(usmNoPrivProtocol))); DEBUGMSG(("comparex", "\n")); if (level == SNMP_SEC_LEVEL_AUTHPRIV && (netsnmp_oid_equals(user->privProtocol, user->privProtocolLen, usmNoPrivProtocol, OID_LENGTH(usmNoPrivProtocol)) == 0)) { DEBUGMSGTL(("usm", "Level: %d\n", level)); DEBUGMSGTL(("usm", "User (%s) Auth Protocol: ", user->name)); DEBUGMSGOID(("usm", user->authProtocol, user->authProtocolLen)); DEBUGMSG(("usm", ", User Priv Protocol: ")); DEBUGMSGOID(("usm", user->privProtocol, user->privProtocolLen)); DEBUGMSG(("usm", "\n")); return 1; } if ((level == SNMP_SEC_LEVEL_AUTHPRIV || level == SNMP_SEC_LEVEL_AUTHNOPRIV) && (netsnmp_oid_equals (user->authProtocol, user->authProtocolLen, usmNoAuthProtocol, OID_LENGTH(usmNoAuthProtocol)) == 0)) { DEBUGMSGTL(("usm", "Level: %d\n", level)); DEBUGMSGTL(("usm", "User (%s) Auth Protocol: ", user->name)); DEBUGMSGOID(("usm", user->authProtocol, user->authProtocolLen)); DEBUGMSG(("usm", ", User Priv Protocol: ")); DEBUGMSGOID(("usm", user->privProtocol, user->privProtocolLen)); DEBUGMSG(("usm", "\n")); return 1; } return 0; } /* end usm_check_secLevel() */ /*******************************************************************-o-****** * usm_process_in_msg * * Parameters: * (See list below...) * * Returns: * SNMPERR_SUCCESS On success. * SNMPERR_USM_AUTHENTICATIONFAILURE * SNMPERR_USM_DECRYPTIONERROR * SNMPERR_USM_GENERICERROR * SNMPERR_USM_PARSEERROR * SNMPERR_USM_UNKNOWNENGINEID * SNMPERR_USM_PARSEERROR * SNMPERR_USM_UNKNOWNSECURITYNAME * SNMPERR_USM_UNSUPPORTEDSECURITYLEVEL * * * ASSUMES size of decrypt_buf will always be >= size of encrypted sPDU. */ static int usm_process_in_msg(int msgProcModel, /* (UNUSED) */ size_t maxMsgSize, /* IN - Used to calc maxSizeResponse. */ u_char * secParams, /* IN - BER encoded securityParameters. */ int secModel, /* (UNUSED) */ int secLevel, /* IN - AuthNoPriv, authPriv etc. */ u_char * wholeMsg, /* IN - Original v3 message. */ size_t wholeMsgLen, /* IN - Msg length. */ u_char * secEngineID, /* OUT - Pointer snmpEngineID. */ size_t * secEngineIDLen, /* IN/OUT - Len available, len returned. */ /* * NOTE: Memory provided by caller. */ char *secName, /* OUT - Pointer to securityName. */ size_t * secNameLen, /* IN/OUT - Len available, len returned. */ u_char ** scopedPdu, /* OUT - Pointer to plaintext scopedPdu. */ size_t * scopedPduLen, /* IN/OUT - Len available, len returned. */ size_t * maxSizeResponse, /* OUT - Max size of Response PDU. */ void **secStateRf, /* OUT - Ref to security state. */ netsnmp_session * sess, /* IN - session which got the message */ u_char msg_flags) { /* IN - v3 Message flags. */ size_t remaining = wholeMsgLen - (u_int) ((u_long) * secParams - (u_long) * wholeMsg); u_int boots_uint; u_int time_uint; #ifdef HAVE_AES u_int net_boots, net_time; #endif #ifndef NETSNMP_DISABLE_DES int i; #endif u_char signature[USM_MAX_AUTHSIZE]; size_t signature_length = USM_MAX_AUTHSIZE; u_char salt[BYTESIZE(USM_MAX_SALT_LENGTH)]; size_t salt_length = BYTESIZE(USM_MAX_SALT_LENGTH); u_char iv[BYTESIZE(USM_MAX_SALT_LENGTH)]; u_int iv_length = BYTESIZE(USM_MAX_SALT_LENGTH); u_char *data_ptr; u_char *value_ptr; u_char type_value; u_char *end_of_overhead = NULL; int error; int rc = 0; struct usmStateReference **secStateRef = (struct usmStateReference **) secStateRf; struct usmUser *user; DEBUGMSGTL(("usm", "USM processing begun...\n")); netsnmp_assert(secStateRef); usm_free_usmStateReference(*secStateRef); *secStateRef = usm_malloc_usmStateReference(); if (*secStateRef == NULL) { DEBUGMSGTL(("usm", "Out of memory.\n")); return SNMPERR_USM_GENERICERROR; } /* * Make sure the *secParms is an OCTET STRING. * Extract the user name, engine ID, and security level. */ if ((rc = usm_parse_security_parameters(secParams, remaining, secEngineID, secEngineIDLen, &boots_uint, &time_uint, secName, secNameLen, signature, &signature_length, salt, &salt_length, &data_ptr)) < 0) { DEBUGMSGTL(("usm", "Parsing failed (rc %d).\n", rc)); if (rc == -2) { /* * This indicates a decryptionError. */ snmp_increment_statistic(STAT_USMSTATSDECRYPTIONERRORS); error = SNMPERR_USM_DECRYPTIONERROR; } else { snmp_increment_statistic(STAT_SNMPINASNPARSEERRS); error = SNMPERR_USM_PARSEERROR; } goto err; } /* * RFC 2574 section 8.3.2 * 1) If the privParameters field is not an 8-octet OCTET STRING, * then an error indication (decryptionError) is returned to the * calling module. */ if ((secLevel == SNMP_SEC_LEVEL_AUTHPRIV) && (salt_length != 8)) { snmp_increment_statistic(STAT_USMSTATSDECRYPTIONERRORS); error = SNMPERR_USM_DECRYPTIONERROR; goto err; } if (secLevel != SNMP_SEC_LEVEL_AUTHPRIV) { /* * pull these out now so reports can use them */ *scopedPdu = data_ptr; *scopedPduLen = wholeMsgLen - (data_ptr - wholeMsg); end_of_overhead = data_ptr; } /* * Cache the name, engine ID, and security level, * * per step 2 (section 3.2) */ if (usm_set_usmStateReference_name (*secStateRef, secName, *secNameLen) == -1) { DEBUGMSGTL(("usm", "%s\n", "Couldn't cache name.")); error = SNMPERR_USM_GENERICERROR; goto err; } if (usm_set_usmStateReference_engine_id (*secStateRef, secEngineID, *secEngineIDLen) == -1) { DEBUGMSGTL(("usm", "%s\n", "Couldn't cache engine id.")); error = SNMPERR_USM_GENERICERROR; goto err; } if (usm_set_usmStateReference_sec_level(*secStateRef, secLevel) == -1) { DEBUGMSGTL(("usm", "%s\n", "Couldn't cache security level.")); error = SNMPERR_USM_GENERICERROR; goto err; } /* * Locate the engine ID record. * If it is unknown, then either create one or note this as an error. */ if ((sess && (sess->isAuthoritative == SNMP_SESS_AUTHORITATIVE || (sess->isAuthoritative == SNMP_SESS_UNKNOWNAUTH && (msg_flags & SNMP_MSG_FLAG_RPRT_BIT)))) || (!sess && (msg_flags & SNMP_MSG_FLAG_RPRT_BIT))) { if (ISENGINEKNOWN(secEngineID, *secEngineIDLen) == FALSE) { DEBUGMSGTL(("usm", "Unknown Engine ID.\n")); snmp_increment_statistic(STAT_USMSTATSUNKNOWNENGINEIDS); error = SNMPERR_USM_UNKNOWNENGINEID; goto err; } } else { if (ENSURE_ENGINE_RECORD(secEngineID, *secEngineIDLen) != SNMPERR_SUCCESS) { DEBUGMSGTL(("usm", "%s\n", "Couldn't ensure engine record.")); error = SNMPERR_USM_GENERICERROR; goto err; } } /* * Locate the User record. * If the user/engine ID is unknown, report this as an error. */ if ((user = usm_get_user_from_list(secEngineID, *secEngineIDLen, secName, *secNameLen, userList, (((sess && sess->isAuthoritative == SNMP_SESS_AUTHORITATIVE) || (!sess)) ? 0 : 1))) == NULL) { DEBUGMSGTL(("usm", "Unknown User(%s)\n", secName)); snmp_increment_statistic(STAT_USMSTATSUNKNOWNUSERNAMES); error = SNMPERR_USM_UNKNOWNSECURITYNAME; goto err; } /* ensure the user is active */ if (user->userStatus != RS_ACTIVE) { DEBUGMSGTL(("usm", "Attempt to use an inactive user.\n")); error = SNMPERR_USM_UNKNOWNSECURITYNAME; goto err; } /* * Make sure the security level is appropriate. */ rc = usm_check_secLevel(secLevel, user); if (1 == rc) { DEBUGMSGTL(("usm", "Unsupported Security Level (%d).\n", secLevel)); snmp_increment_statistic(STAT_USMSTATSUNSUPPORTEDSECLEVELS); error = SNMPERR_USM_UNSUPPORTEDSECURITYLEVEL; goto err; } else if (rc != 0) { DEBUGMSGTL(("usm", "Unknown issue.\n")); error = SNMPERR_USM_GENERICERROR; goto err; } /* * Check the authentication credentials of the message. */ if (secLevel == SNMP_SEC_LEVEL_AUTHNOPRIV || secLevel == SNMP_SEC_LEVEL_AUTHPRIV) { if (sc_check_keyed_hash(user->authProtocol, user->authProtocolLen, user->authKey, user->authKeyLen, wholeMsg, wholeMsgLen, signature, signature_length) != SNMP_ERR_NOERROR) { DEBUGMSGTL(("usm", "Verification failed.\n")); snmp_increment_statistic(STAT_USMSTATSWRONGDIGESTS); snmp_log(LOG_WARNING, "Authentication failed for %s\n", user->name); error = SNMPERR_USM_AUTHENTICATIONFAILURE; goto err; } DEBUGMSGTL(("usm", "Verification succeeded.\n")); } /* * Steps 10-11 user is already set - relocated before timeliness * check in case it fails - still save user data for response. * * Cache the keys and protocol oids, per step 11 (s3.2). */ if (usm_set_usmStateReference_auth_protocol(*secStateRef, user->authProtocol, user-> authProtocolLen) == -1) { DEBUGMSGTL(("usm", "%s\n", "Couldn't cache authentication protocol.")); error = SNMPERR_USM_GENERICERROR; goto err; } if (usm_set_usmStateReference_auth_key(*secStateRef, user->authKey, user->authKeyLen) == -1) { DEBUGMSGTL(("usm", "%s\n", "Couldn't cache authentication key.")); error = SNMPERR_USM_GENERICERROR; goto err; } if (usm_set_usmStateReference_priv_protocol(*secStateRef, user->privProtocol, user-> privProtocolLen) == -1) { DEBUGMSGTL(("usm", "%s\n", "Couldn't cache privacy protocol.")); error = SNMPERR_USM_GENERICERROR; goto err; } if (usm_set_usmStateReference_priv_key(*secStateRef, user->privKey, user->privKeyLen) == -1) { DEBUGMSGTL(("usm", "%s\n", "Couldn't cache privacy key.")); error = SNMPERR_USM_GENERICERROR; goto err; } /* * Perform the timeliness/time manager functions. */ if (secLevel == SNMP_SEC_LEVEL_AUTHNOPRIV || secLevel == SNMP_SEC_LEVEL_AUTHPRIV) { if (usm_check_and_update_timeliness(secEngineID, *secEngineIDLen, boots_uint, time_uint, &error) == -1) { goto err; } } #ifdef LCD_TIME_SYNC_OPT /* * Cache the unauthenticated time to use in case we don't have * anything better - this guess will be no worse than (0,0) * that we normally use. */ else { set_enginetime(secEngineID, *secEngineIDLen, boots_uint, time_uint, FALSE); } #endif /* LCD_TIME_SYNC_OPT */ /* * If needed, decrypt the scoped PDU. */ if (secLevel == SNMP_SEC_LEVEL_AUTHPRIV) { int priv_type = sc_get_privtype(user->privProtocol, user->privProtocolLen); remaining = wholeMsgLen - (data_ptr - wholeMsg); if ((value_ptr = asn_parse_sequence(data_ptr, &remaining, &type_value, (ASN_UNIVERSAL | ASN_PRIMITIVE | ASN_OCTET_STR), "encrypted sPDU")) == NULL) { DEBUGMSGTL(("usm", "%s\n", "Failed while parsing encrypted sPDU.")); snmp_increment_statistic(STAT_SNMPINASNPARSEERRS); usm_free_usmStateReference(*secStateRef); *secStateRef = NULL; error = SNMPERR_USM_PARSEERROR; goto err; } #ifndef NETSNMP_DISABLE_DES if (USM_CREATE_USER_PRIV_DES == (priv_type & USM_PRIV_MASK_ALG)) { /* * From RFC2574: * * "Before decryption, the encrypted data length is verified. * If the length of the OCTET STRING to be decrypted is not * an integral multiple of 8 octets, the decryption process * is halted and an appropriate exception noted." */ if (remaining % 8 != 0) { DEBUGMSGTL(("usm", "Ciphertext is %lu bytes, not an integer multiple of 8 (rem %lu)\n", (unsigned long)remaining, (unsigned long)remaining % 8)); snmp_increment_statistic(STAT_USMSTATSDECRYPTIONERRORS); usm_free_usmStateReference(*secStateRef); *secStateRef = NULL; error = SNMPERR_USM_DECRYPTIONERROR; goto err; } end_of_overhead = value_ptr; if ( !user->privKey ) { DEBUGMSGTL(("usm", "No privacy pass phrase for %s\n", user->secName)); snmp_increment_statistic(STAT_USMSTATSDECRYPTIONERRORS); usm_free_usmStateReference(*secStateRef); *secStateRef = NULL; error = SNMPERR_USM_DECRYPTIONERROR; goto err; } /* * XOR the salt with the last (iv_length) bytes * of the priv_key to obtain the IV. */ iv_length = BYTESIZE(USM_DES_SALT_LENGTH); for (i = 0; i < (int) iv_length; i++) iv[i] = salt[i] ^ user->privKey[iv_length + i]; } #endif #ifdef HAVE_AES if (USM_CREATE_USER_PRIV_AES == (priv_type & USM_PRIV_MASK_ALG)) { iv_length = BYTESIZE(USM_AES_SALT_LENGTH); net_boots = ntohl(boots_uint); net_time = ntohl(time_uint); memcpy(iv, &net_boots, 4); memcpy(iv+4, &net_time, 4); memcpy(iv+8, salt, salt_length); } #endif #ifdef NETSNMP_ENABLE_TESTING_CODE if (debug_is_token_registered("usm/dump") == SNMPERR_SUCCESS) { dump_chunk("usm/dump", "Cypher Text", value_ptr, remaining); dump_chunk("usm/dump", "salt + Encrypted form:", salt, salt_length); dump_chunk("usm/dump", "IV + Encrypted form:", iv, iv_length); } #endif if (sc_decrypt(user->privProtocol, user->privProtocolLen, user->privKey, user->privKeyLen, iv, iv_length, value_ptr, remaining, *scopedPdu, scopedPduLen) != SNMP_ERR_NOERROR) { DEBUGMSGTL(("usm", "%s\n", "Failed decryption.")); snmp_increment_statistic(STAT_USMSTATSDECRYPTIONERRORS); error = SNMPERR_USM_DECRYPTIONERROR; goto err; } #ifdef NETSNMP_ENABLE_TESTING_CODE if (debug_is_token_registered("usm/dump") == SNMPERR_SUCCESS) { dump_chunk("usm/dump", "Decrypted chunk:", *scopedPdu, *scopedPduLen); } #endif } /* * sPDU is plaintext. */ else { *scopedPdu = data_ptr; *scopedPduLen = wholeMsgLen - (data_ptr - wholeMsg); end_of_overhead = data_ptr; } /* endif -- PDU decryption */ /* * Calculate the biggest sPDU for the response (i.e., whole - ovrhd). * * FIX Correct? */ *maxSizeResponse = maxMsgSize - (end_of_overhead - wholeMsg); DEBUGMSGTL(("usm", "USM processing completed.\n")); return SNMPERR_SUCCESS; err: usm_free_usmStateReference(*secStateRef); *secStateRef = NULL; netsnmp_assert(error != SNMPERR_SUCCESS); return error; } /* end usm_process_in_msg() */ static int usm_secmod_process_in_msg(struct snmp_secmod_incoming_params *parms) { if (!parms) return SNMPERR_GENERR; return usm_process_in_msg(parms->msgProcModel, parms->maxMsgSize, parms->secParams, parms->secModel, parms->secLevel, parms->wholeMsg, parms->wholeMsgLen, parms->secEngineID, parms->secEngineIDLen, parms->secName, parms->secNameLen, parms->scopedPdu, parms->scopedPduLen, parms->maxSizeResponse, parms->secStateRef, parms->sess, parms->msg_flags); } static void usm_handle_report(struct session_list *slp, netsnmp_transport *transport, netsnmp_session *session, int result, netsnmp_pdu *pdu) { /* * handle reportable errors */ /* this will get in our way */ usm_free_usmStateReference(pdu->securityStateRef); pdu->securityStateRef = NULL; switch (result) { case SNMPERR_USM_AUTHENTICATIONFAILURE: { int res = session->s_snmp_errno; session->s_snmp_errno = result; if (session->callback) { session->callback(NETSNMP_CALLBACK_OP_RECEIVED_MESSAGE, session, pdu->reqid, pdu, session->callback_magic); } session->s_snmp_errno = res; } /* fallthrough */ case SNMPERR_USM_UNKNOWNENGINEID: case SNMPERR_USM_UNKNOWNSECURITYNAME: case SNMPERR_USM_UNSUPPORTEDSECURITYLEVEL: case SNMPERR_USM_NOTINTIMEWINDOW: case SNMPERR_USM_DECRYPTIONERROR: if (SNMP_CMD_CONFIRMED(pdu->command) || (pdu->command == 0 && (pdu->flags & SNMP_MSG_FLAG_RPRT_BIT))) { netsnmp_pdu *pdu2; int flags = pdu->flags; pdu->flags |= UCD_MSG_FLAG_FORCE_PDU_COPY; pdu2 = snmp_clone_pdu(pdu); pdu->flags = pdu2->flags = flags; snmpv3_make_report(pdu2, result); if (0 == snmp_sess_send(slp, pdu2)) { snmp_free_pdu(pdu2); /* * TODO: indicate error */ } } break; } } /** utility function to call netsnmp_extend_kul for a usmUser */ int usm_extend_user_kul(struct usmUser *user, u_int privKeyBufSize) { const netsnmp_priv_alg_info *pai; DEBUGMSGTL(("usm", "extending key\n")); if (NULL == user) { DEBUGMSGTL(("usm", "null user!\n")); return SNMPERR_GENERR; } pai = sc_get_priv_alg_byoid(user->privProtocol, user->privProtocolLen); if (NULL == pai) { DEBUGMSGTL(("usm", "privProtocol lookup failed!\n")); return SNMPERR_GENERR; } return netsnmp_extend_kul(pai->proper_length, user->authProtocol, user->authProtocolLen, pai->type, user->engineID, user->engineIDLen, &user->privKey, &user->privKeyLen, privKeyBufSize); } /* sets up initial default session parameters */ static int usm_session_init(netsnmp_session *in_session, netsnmp_session *session) { char *cp; size_t i; if (in_session->securityAuthProtoLen > 0) { session->securityAuthProto = snmp_duplicate_objid(in_session->securityAuthProto, in_session->securityAuthProtoLen); if (session->securityAuthProto == NULL) { in_session->s_snmp_errno = SNMPERR_MALLOC; return SNMPERR_MALLOC; } } else if (get_default_authtype(&i) != NULL) { session->securityAuthProto = snmp_duplicate_objid(get_default_authtype(NULL), i); session->securityAuthProtoLen = i; } if (in_session->securityPrivProtoLen > 0) { session->securityPrivProto = snmp_duplicate_objid(in_session->securityPrivProto, in_session->securityPrivProtoLen); if (session->securityPrivProto == NULL) { in_session->s_snmp_errno = SNMPERR_MALLOC; return SNMPERR_MALLOC; } } else if (get_default_privtype(&i) != NULL) { session->securityPrivProto = snmp_duplicate_objid(get_default_privtype(NULL), i); session->securityPrivProtoLen = i; } if ((in_session->securityAuthKeyLen <= 0) && ((cp = netsnmp_ds_get_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_AUTHMASTERKEY)))) { size_t buflen = sizeof(session->securityAuthKey); u_char *tmpp = session->securityAuthKey; session->securityAuthKeyLen = 0; /* it will be a hex string */ if (!snmp_hex_to_binary(&tmpp, &buflen, &session->securityAuthKeyLen, 0, cp)) { snmp_set_detail("error parsing authentication master key"); return SNMP_ERR_GENERR; } } else if ((in_session->securityAuthKeyLen <= 0) && ((cp = netsnmp_ds_get_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_AUTHPASSPHRASE)) || (cp = netsnmp_ds_get_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_PASSPHRASE)))) { session->securityAuthKeyLen = USM_AUTH_KU_LEN; if (generate_Ku(session->securityAuthProto, session->securityAuthProtoLen, (u_char *) cp, strlen(cp), session->securityAuthKey, &session->securityAuthKeyLen) != SNMPERR_SUCCESS) { snmp_set_detail ("Error generating a key (Ku) from the supplied authentication pass phrase."); return SNMP_ERR_GENERR; } } if ((in_session->securityPrivKeyLen <= 0) && ((cp = netsnmp_ds_get_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_PRIVMASTERKEY)))) { size_t buflen = sizeof(session->securityPrivKey); u_char *tmpp = session->securityPrivKey; session->securityPrivKeyLen = 0; /* it will be a hex string */ if (!snmp_hex_to_binary(&tmpp, &buflen, &session->securityPrivKeyLen, 0, cp)) { snmp_set_detail("error parsing encryption master key"); return SNMP_ERR_GENERR; } } else if ((in_session->securityPrivKeyLen <= 0) && ((cp = netsnmp_ds_get_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_PRIVPASSPHRASE)) || (cp = netsnmp_ds_get_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_PASSPHRASE)))) { session->securityPrivKeyLen = USM_PRIV_KU_LEN; if (generate_Ku(session->securityAuthProto, session->securityAuthProtoLen, (u_char *) cp, strlen(cp), session->securityPrivKey, &session->securityPrivKeyLen) != SNMPERR_SUCCESS) { snmp_set_detail ("Error generating a key (Ku) from the supplied privacy pass phrase."); return SNMP_ERR_GENERR; } } return SNMPERR_SUCCESS; } static int usm_build_user(struct usmUser **result, const netsnmp_session *session) { struct usmUser *user; DEBUGMSGTL(("usm", "Building user %s...\n", session->securityName)); /* * user doesn't exist so we create and add it */ user = calloc(1, sizeof(struct usmUser)); if (user == NULL) goto err; /* * copy in the securityName */ if (session->securityName) { user->name = strdup(session->securityName); user->secName = strdup(session->securityName); if (user->name == NULL || user->secName == NULL) goto err; } /* * copy in the engineID */ user->engineID = netsnmp_memdup(session->securityEngineID, session->securityEngineIDLen); if (session->securityEngineID && !user->engineID) goto err; user->engineIDLen = session->securityEngineIDLen; *result = user; return SNMPERR_SUCCESS; err: usm_free_user(user); return SNMPERR_GENERR; } /* * usm_create_user_from_session(netsnmp_session *session): * * creates a user in the usm table from the information in a session. * If the user already exists, it is updated with the current * information from the session * * Parameters: * session -- IN: pointer to the session to use when creating the user. * * Returns: * SNMPERR_SUCCESS * SNMPERR_GENERR */ int usm_create_user_from_session(netsnmp_session * session) { struct usmUser *user; int user_just_created = 0; char *cp; /* * - don't create-another/copy-into user for this session by default * - bail now (no error) if we don't have an engineID */ if (SNMP_FLAGS_USER_CREATED == (session->flags & SNMP_FLAGS_USER_CREATED) || session->securityModel != SNMP_SEC_MODEL_USM || session->version != SNMP_VERSION_3 || session->securityNameLen == 0 || session->securityEngineIDLen == 0) return SNMPERR_SUCCESS; DEBUGMSGTL(("usm", "no flag defined... continuing\n")); session->flags |= SNMP_FLAGS_USER_CREATED; /* * now that we have the engineID, create an entry in the USM list * for this user using the information in the session */ user = usm_get_user_from_list(session->securityEngineID, session->securityEngineIDLen, session->securityName, session->securityNameLen, usm_get_userList(), 0); if (NULL != user) { DEBUGMSGTL(("usm", "user exists x=%p\n", user)); } else { if (usm_build_user(&user, session) != SNMPERR_SUCCESS) return SNMPERR_GENERR; user_just_created = 1; } /* * copy the auth protocol */ if (user->authProtocol == NULL && session->securityAuthProto != NULL) { SNMP_FREE(user->authProtocol); user->authProtocol = snmp_duplicate_objid(session->securityAuthProto, session->securityAuthProtoLen); if (user->authProtocol == NULL) { usm_free_user(user); return SNMPERR_GENERR; } user->authProtocolLen = session->securityAuthProtoLen; } /* * copy the priv protocol */ if (user->privProtocol == NULL && session->securityPrivProto != NULL) { SNMP_FREE(user->privProtocol); user->privProtocol = snmp_duplicate_objid(session->securityPrivProto, session->securityPrivProtoLen); if (user->privProtocol == NULL) { usm_free_user(user); return SNMPERR_GENERR; } user->privProtocolLen = session->securityPrivProtoLen; } /* * copy in the authentication Key. If not localized, localize it */ if (user->authKey == NULL) { if (session->securityAuthLocalKey != NULL && session->securityAuthLocalKeyLen != 0) { /* already localized key passed in. use it */ SNMP_FREE(user->authKey); user->authKey = netsnmp_memdup(session->securityAuthLocalKey, session->securityAuthLocalKeyLen); if (!user->authKey) { usm_free_user(user); return SNMPERR_GENERR; } user->authKeyLen = session->securityAuthLocalKeyLen; } else if (session->securityAuthKeyLen != 0) { SNMP_FREE(user->authKey); user->authKey = (u_char *) calloc(1, USM_LENGTH_KU_HASHBLOCK); user->authKeyLen = USM_LENGTH_KU_HASHBLOCK; if ((user->authKey == NULL) || generate_kul(user->authProtocol, user->authProtocolLen, user->engineID, user->engineIDLen, session->securityAuthKey, session->securityAuthKeyLen, user->authKey, &user->authKeyLen) != SNMPERR_SUCCESS) { usm_free_user(user); return SNMPERR_GENERR; } } else if ((cp = netsnmp_ds_get_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_AUTHLOCALIZEDKEY))) { size_t buflen = USM_AUTH_KU_LEN; SNMP_FREE(user->authKey); user->authKey = (u_char *)malloc(buflen); /* max length needed */ user->authKeyLen = 0; /* it will be a hex string */ if ((NULL == user->authKey) || !snmp_hex_to_binary(&user->authKey, &buflen, &user->authKeyLen, 0, cp)) { usm_free_user(user); return SNMPERR_GENERR; } } } /* * copy in the privacy Key. If not localized, localize it */ if (user->privKey == NULL) { /** save buffer size in case we need to extend key */ int keyBufSize = USM_PRIV_KU_LEN; DEBUGMSGTL(("usm", "copying privKey\n")); if (session->securityPrivLocalKey != NULL && session->securityPrivLocalKeyLen != 0) { /* already localized key passed in. use it */ SNMP_FREE(user->privKey); user->privKey = netsnmp_memdup(session->securityPrivLocalKey, session->securityPrivLocalKeyLen); if (!user->privKey) { usm_free_user(user); return SNMPERR_GENERR; } keyBufSize = user->privKeyLen = session->securityPrivLocalKeyLen; } else if (session->securityPrivKeyLen != 0) { SNMP_FREE(user->privKey); user->privKey = (u_char *) calloc(1, keyBufSize); user->privKeyLen = keyBufSize; if ((user->privKey == NULL) || generate_kul(user->authProtocol, user->authProtocolLen, user->engineID, user->engineIDLen, session->securityPrivKey, session->securityPrivKeyLen, user->privKey, &user->privKeyLen) != SNMPERR_SUCCESS) { usm_free_user(user); return SNMPERR_GENERR; } } else if ((cp = netsnmp_ds_get_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_PRIVLOCALIZEDKEY))) { size_t buflen = keyBufSize; user->privKey = (u_char *)malloc(buflen); /* max length needed */ user->privKeyLen = 0; /* it will be a hex string */ if ((NULL == user->privKey) || !snmp_hex_to_binary(&user->privKey, &buflen, &user->privKeyLen, 0, cp)) { usm_free_user(user); return SNMPERR_GENERR; } } if (usm_extend_user_kul(user, keyBufSize) != SNMPERR_SUCCESS) { usm_free_user(user); return SNMPERR_GENERR; } } if (user_just_created) { /* * add the user into the database */ user->userStatus = RS_ACTIVE; user->userStorageType = ST_READONLY; usm_add_user(user); } DEBUGMSGTL(("9:usm", "user created\n")); return SNMPERR_SUCCESS; } /* A wrapper around the hook */ static int usm_create_user_from_session_hook(struct session_list *slp, netsnmp_session *session) { DEBUGMSGTL(("usm", "potentially bootstrapping the USM table from session data\n")); return usm_create_user_from_session(session); } static int usm_build_probe_pdu(netsnmp_pdu **pdu) { struct usmUser *user; /* * create the pdu */ if (!pdu) return -1; *pdu = snmp_pdu_create(SNMP_MSG_GET); if (!(*pdu)) return -1; (*pdu)->version = SNMP_VERSION_3; (*pdu)->securityName = strdup(""); (*pdu)->securityNameLen = strlen((*pdu)->securityName); (*pdu)->securityLevel = SNMP_SEC_LEVEL_NOAUTH; (*pdu)->securityModel = SNMP_SEC_MODEL_USM; /* * create the empty user */ user = usm_get_user2(NULL, 0, (*pdu)->securityName, (*pdu)->securityNameLen); if (user == NULL) { user = (struct usmUser *) calloc(1, sizeof(struct usmUser)); if (user == NULL) { snmp_free_pdu(*pdu); *pdu = (netsnmp_pdu *) NULL; return -1; } user->name = strdup((*pdu)->securityName); user->secName = strdup((*pdu)->securityName); user->authProtocolLen = OID_LENGTH(usmNoAuthProtocol); user->authProtocol = snmp_duplicate_objid(usmNoAuthProtocol, user->authProtocolLen); user->privProtocolLen = OID_LENGTH(usmNoPrivProtocol); user->privProtocol = snmp_duplicate_objid(usmNoPrivProtocol, user->privProtocolLen); usm_add_user(user); } return 0; } static int usm_discover_engineid(struct session_list *slp, netsnmp_session *session) { netsnmp_pdu *pdu = NULL, *response = NULL; int status, i; if (usm_build_probe_pdu(&pdu) != 0) { DEBUGMSGTL(("snmp_api", "unable to create probe PDU\n")); return SNMP_ERR_GENERR; } DEBUGMSGTL(("snmp_api", "probing for engineID...\n")); session->flags |= SNMP_FLAGS_DONT_PROBE; /* prevent recursion */ status = snmp_sess_synch_response(slp, pdu, &response); if ((response == NULL) && (status == STAT_SUCCESS)) { status = STAT_ERROR; } switch (status) { case STAT_SUCCESS: session->s_snmp_errno = SNMPERR_INVALID_MSG; /* XX?? */ DEBUGMSGTL(("snmp_sess_open", "error: expected Report as response to probe: %s (%ld)\n", snmp_errstring(response->errstat), response->errstat)); break; case STAT_ERROR: /* this is what we expected -> Report == STAT_ERROR */ session->s_snmp_errno = SNMPERR_UNKNOWN_ENG_ID; break; case STAT_TIMEOUT: session->s_snmp_errno = SNMPERR_TIMEOUT; break; default: DEBUGMSGTL(("snmp_sess_open", "unable to connect with remote engine: %s (%d)\n", snmp_api_errstring(session->s_snmp_errno), session->s_snmp_errno)); break; } if (slp->session->securityEngineIDLen == 0) { DEBUGMSGTL(("snmp_api", "unable to determine remote engine ID\n")); /* clear the flag so that probe occurs on next inform */ session->flags &= ~SNMP_FLAGS_DONT_PROBE; return SNMP_ERR_GENERR; } session->s_snmp_errno = SNMPERR_SUCCESS; if (snmp_get_do_debugging()) { DEBUGMSGTL(("snmp_sess_open", " probe found engineID: ")); for (i = 0; i < slp->session->securityEngineIDLen; i++) DEBUGMSG(("snmp_sess_open", "%02x", slp->session->securityEngineID[i])); DEBUGMSG(("snmp_sess_open", "\n")); } /* * if boot/time supplied set it for this engineID */ if (session->engineBoots || session->engineTime) { set_enginetime(session->securityEngineID, session->securityEngineIDLen, session->engineBoots, session->engineTime, TRUE); } return SNMPERR_SUCCESS; } static int usm_lookup_alg_type(const char *str, const usm_alg_type_t *types) { int i, l; l = strlen(str); for (i = 0; types[i].label; ++i) { if (0 == strncasecmp(types[i].label, str, l)) return types[i].value; } return -1; } static const char * usm_lookup_alg_str(int value, const usm_alg_type_t *types) { int i; for (i = 0; types[i].label; ++i) if (value == types[i].value) return types[i].label; return NULL; } int usm_lookup_auth_type(const char *str) { return usm_lookup_alg_type(str, usm_auth_type ); } int usm_lookup_priv_type(const char *str) { return usm_lookup_alg_type(str, usm_priv_type ); } const char * usm_lookup_auth_str(int value) { return usm_lookup_alg_str(value, usm_auth_type ); } const char * usm_lookup_priv_str(int value) { return usm_lookup_alg_str(value, usm_priv_type ); } static void clear_user_list(void) { struct usmUser *tmp = userList, *next = NULL; while (tmp != NULL) { next = tmp->next; usm_free_user(tmp); tmp = next; } userList = NULL; } #ifndef NETSNMP_NO_WRITE_SUPPORT /* * take a given user and clone the security info into another */ struct usmUser * usm_cloneFrom_user(struct usmUser *from, struct usmUser *to) { to->flags = from->flags; /* * copy the authProtocol oid row pointer */ SNMP_FREE(to->authProtocol); if ((to->authProtocol = snmp_duplicate_objid(from->authProtocol, from->authProtocolLen)) != NULL) to->authProtocolLen = from->authProtocolLen; else to->authProtocolLen = 0; /* * copy the authKey */ SNMP_FREE(to->authKey); if (from->authKeyLen > 0 && (to->authKey = (u_char *) malloc(from->authKeyLen)) != NULL) { to->authKeyLen = from->authKeyLen; memcpy(to->authKey, from->authKey, to->authKeyLen); } else { to->authKey = NULL; to->authKeyLen = 0; } /* * copy the authKeyKu */ SNMP_FREE(to->authKeyKu); if (from->authKeyKuLen > 0 && (to->authKeyKu = (u_char *) malloc(from->authKeyKuLen)) != NULL) { to->authKeyKuLen = from->authKeyKuLen; memcpy(to->authKeyKu, from->authKeyKu, to->authKeyKuLen); } else { to->authKeyKu = NULL; to->authKeyKuLen = 0; } /* * copy the privProtocol oid row pointer */ SNMP_FREE(to->privProtocol); if ((to->privProtocol = snmp_duplicate_objid(from->privProtocol, from->privProtocolLen)) != NULL) to->privProtocolLen = from->privProtocolLen; else to->privProtocolLen = 0; /* * copy the privKey */ SNMP_FREE(to->privKey); if (from->privKeyLen > 0 && (to->privKey = (u_char *) malloc(from->privKeyLen)) != NULL) { to->privKeyLen = from->privKeyLen; memcpy(to->privKey, from->privKey, to->privKeyLen); } else { to->privKey = NULL; to->privKeyLen = 0; } /* * copy the privKeyKu */ SNMP_FREE(to->privKeyKu); if (from->privKeyKuLen > 0 && (to->privKeyKu = (u_char *) malloc(from->privKeyKuLen)) != NULL) { to->privKeyKuLen = from->privKeyKuLen; memcpy(to->privKeyKu, from->privKeyKu, to->privKeyKuLen); } else { to->privKeyKu = NULL; to->privKeyKuLen = 0; } return to; } #endif /* NETSNMP_NO_WRITE_SUPPORT */ /* * usm_create_user(void): * create a default empty user, instantiating only the auth/priv * protocols to noAuth and noPriv OID pointers */ struct usmUser * usm_create_user(void) { struct usmUser *newUser; /* * create the new user */ newUser = (struct usmUser *) calloc(1, sizeof(struct usmUser)); if (newUser == NULL) return NULL; /* * fill the auth/priv protocols */ if ((newUser->authProtocol = snmp_duplicate_objid(usmNoAuthProtocol, OID_LENGTH(usmNoAuthProtocol))) == NULL) return usm_free_user(newUser); newUser->authProtocolLen = OID_LENGTH(usmNoAuthProtocol); if ((newUser->privProtocol = snmp_duplicate_objid(usmNoPrivProtocol, OID_LENGTH(usmNoPrivProtocol))) == NULL) return usm_free_user(newUser); newUser->privProtocolLen = OID_LENGTH(usmNoPrivProtocol); /* * set the storage type to nonvolatile, and the status to ACTIVE */ newUser->userStorageType = ST_NONVOLATILE; newUser->userStatus = RS_ACTIVE; return newUser; } /* end usm_clone_user() */ /* * usm_create_initial_user(void): * creates an initial user, filled with the defaults defined in the * USM document. */ static struct usmUser * usm_create_initial_user(const char *name, const oid * authProtocol, size_t authProtocolLen, const oid * privProtocol, size_t privProtocolLen) { struct usmUser *newUser = usm_create_user(); if (newUser == NULL) return NULL; if ((newUser->name = strdup(name)) == NULL) return usm_free_user(newUser); if ((newUser->secName = strdup(name)) == NULL) return usm_free_user(newUser); if ((newUser->engineID = snmpv3_generate_engineID(&newUser->engineIDLen)) == NULL) return usm_free_user(newUser); if ((newUser->cloneFrom = (oid *) malloc(sizeof(oid) * 2)) == NULL) return usm_free_user(newUser); newUser->cloneFrom[0] = 0; newUser->cloneFrom[1] = 0; newUser->cloneFromLen = 2; SNMP_FREE(newUser->privProtocol); if ((newUser->privProtocol = snmp_duplicate_objid(privProtocol, privProtocolLen)) == NULL) { return usm_free_user(newUser); } newUser->privProtocolLen = privProtocolLen; SNMP_FREE(newUser->authProtocol); if ((newUser->authProtocol = snmp_duplicate_objid(authProtocol, authProtocolLen)) == NULL) { return usm_free_user(newUser); } newUser->authProtocolLen = authProtocolLen; newUser->userStatus = RS_ACTIVE; newUser->userStorageType = ST_READONLY; return newUser; } /* * usm_save_user(): saves a user to the persistent cache */ static void usm_save_user(struct usmUser *user, const char *token, const char *type) { char line[4096]; char *cptr; memset(line, 0, sizeof(line)); sprintf(line, "%s %d %d ", token, user->userStatus, user->userStorageType); cptr = &line[strlen(line)]; /* the NULL */ cptr = read_config_save_octet_string(cptr, user->engineID, user->engineIDLen); *cptr++ = ' '; cptr = read_config_save_octet_string(cptr, (u_char *) user->name, (user->name == NULL) ? 0 : strlen(user->name)); *cptr++ = ' '; cptr = read_config_save_octet_string(cptr, (u_char *) user->secName, (user->secName == NULL) ? 0 : strlen(user->secName)); *cptr++ = ' '; cptr = read_config_save_objid(cptr, user->cloneFrom, user->cloneFromLen); *cptr++ = ' '; cptr = read_config_save_objid(cptr, user->authProtocol, user->authProtocolLen); *cptr++ = ' '; cptr = read_config_save_octet_string(cptr, user->authKey, user->authKeyLen); *cptr++ = ' '; cptr = read_config_save_objid(cptr, user->privProtocol, user->privProtocolLen); *cptr++ = ' '; cptr = read_config_save_octet_string(cptr, user->privKey, user->privKeyLen); *cptr++ = ' '; cptr = read_config_save_octet_string(cptr, user->userPublicString, user->userPublicStringLen); read_config_store(type, line); } static void usm_save_users_from_list(struct usmUser *puserList, const char *token, const char *type) { struct usmUser *uptr; for (uptr = puserList; uptr != NULL; uptr = uptr->next) { if (uptr->userStorageType == ST_NONVOLATILE) usm_save_user(uptr, token, type); } } /* * usm_save_users(): saves a list of users to the persistent cache */ static void usm_save_users(const char *token, const char *type) { usm_save_users_from_list(userList, token, type); } /* * this is a callback that can store all known users based on a * previously registered application ID */ static int usm_store_users(int majorID, int minorID, void *serverarg, void *clientarg) { /* * figure out our application name */ char *appname = (char *) clientarg; if (appname == NULL) { appname = netsnmp_ds_get_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_APPTYPE); } /* * save the user base */ usm_save_users("usmUser", appname); /* * never fails */ return SNMPERR_SUCCESS; } /* * usm_parse_user(): reads in a line containing a saved user profile * and returns a pointer to a newly created struct usmUser. */ static struct usmUser * usm_read_user(const char *line) { struct usmUser *user; size_t len, proper_length, privtype; user = usm_create_user(); if (user == NULL) return NULL; user->userStatus = atoi(line); line = skip_token_const(line); user->userStorageType = atoi(line); line = skip_token_const(line); line = read_config_read_octet_string_const(line, &user->engineID, &user->engineIDLen); /* * set the lcd entry for this engineID to the minimum boots/time * values so that its a known engineid and won't return a report pdu. * This is mostly important when receiving v3 traps so that the usm * will at least continue processing them. */ set_enginetime(user->engineID, user->engineIDLen, 1, 0, 0); line = read_config_read_octet_string(line, (u_char **) & user->name, &len); line = read_config_read_octet_string(line, (u_char **) & user->secName, &len); SNMP_FREE(user->cloneFrom); user->cloneFromLen = 0; line = read_config_read_objid_const(line, &user->cloneFrom, &user->cloneFromLen); SNMP_FREE(user->authProtocol); user->authProtocolLen = 0; line = read_config_read_objid_const(line, &user->authProtocol, &user->authProtocolLen); line = read_config_read_octet_string_const(line, &user->authKey, &user->authKeyLen); SNMP_FREE(user->privProtocol); user->privProtocolLen = 0; line = read_config_read_objid_const(line, &user->privProtocol, &user->privProtocolLen); line = read_config_read_octet_string(line, &user->privKey, &user->privKeyLen); privtype = sc_get_privtype(user->privProtocol, user->privProtocolLen); proper_length = sc_get_proper_priv_length_bytype(privtype); if (USM_CREATE_USER_PRIV_DES == privtype) proper_length *= 2; /* ?? we store salt with key */ /* For backwards compatibility */ if (user->privKeyLen > proper_length) { user->privKeyLen = proper_length; } line = read_config_read_octet_string(line, &user->userPublicString, &user->userPublicStringLen); return user; } /* * snmpd.conf parsing routines */ void usm_parse_config_usmUser(const char *token, char *line) { struct usmUser *uptr; uptr = usm_read_user(line); if ( uptr) usm_add_user(uptr); } /*******************************************************************-o-****** * usm_set_password * * Parameters: * *token * *line * * * format: userSetAuthPass secname engineIDLen engineID pass * or: userSetPrivPass secname engineIDLen engineID pass * or: userSetAuthKey secname engineIDLen engineID KuLen Ku * or: userSetPrivKey secname engineIDLen engineID KuLen Ku * or: userSetAuthLocalKey secname engineIDLen engineID KulLen Kul * or: userSetPrivLocalKey secname engineIDLen engineID KulLen Kul * * type is: 1=passphrase; 2=Ku; 3=Kul. * * * ASSUMES Passwords are null-terminated printable strings. */ static void usm_set_password(const char *token, char *line) { char *cp; char nameBuf[SNMP_MAXBUF]; u_char *engineID = NULL; size_t engineIDLen = 0; struct usmUser *user; cp = copy_nword(line, nameBuf, sizeof(nameBuf)); if (cp == NULL) { config_perror("invalid name specifier"); return; } DEBUGMSGTL(("usm", "comparing: %s and %s\n", cp, WILDCARDSTRING)); if (strncmp(cp, WILDCARDSTRING, strlen(WILDCARDSTRING)) == 0) { /* * match against all engineIDs we know about */ cp = skip_token(cp); for (user = userList; user != NULL; user = user->next) { if (user->secName && strcmp(user->secName, nameBuf) == 0) { usm_set_user_password(user, token, cp); } } } else { cp = read_config_read_octet_string(cp, &engineID, &engineIDLen); if (cp == NULL) { config_perror("invalid engineID specifier"); SNMP_FREE(engineID); return; } user = usm_get_user(engineID, engineIDLen, nameBuf); if (user == NULL) { config_perror("not a valid user/engineID pair"); SNMP_FREE(engineID); return; } usm_set_user_password(user, token, cp); SNMP_FREE(engineID); } } /* * uses the rest of LINE to configure USER's password of type TOKEN */ void usm_set_user_password(struct usmUser *user, const char *token, char *line) { char *cp = line; u_char *engineID = user->engineID; size_t engineIDLen = user->engineIDLen; u_char **key; size_t *keyLen; u_char userKey[SNMP_MAXBUF_SMALL]; size_t userKeyLen = SNMP_MAXBUF_SMALL; u_char *userKeyP = userKey; int type, ret; /* * Retrieve the "old" key and set the key type. */ if (!token) { return; } else if (strcmp(token, "userSetAuthPass") == 0) { key = &user->authKey; keyLen = &user->authKeyLen; type = 0; } else if (strcmp(token, "userSetPrivPass") == 0) { key = &user->privKey; keyLen = &user->privKeyLen; type = 0; } else if (strcmp(token, "userSetAuthKey") == 0) { key = &user->authKey; keyLen = &user->authKeyLen; type = 1; } else if (strcmp(token, "userSetPrivKey") == 0) { key = &user->privKey; keyLen = &user->privKeyLen; type = 1; } else if (strcmp(token, "userSetAuthLocalKey") == 0) { key = &user->authKey; keyLen = &user->authKeyLen; type = 2; } else if (strcmp(token, "userSetPrivLocalKey") == 0) { key = &user->privKey; keyLen = &user->privKeyLen; type = 2; } else { /* * no old key, or token was not recognized */ return; } if (*key) { /* * (destroy and) free the old key */ memset(*key, 0, *keyLen); SNMP_FREE(*key); } if (type == 0) { /* * convert the password into a key */ if (cp == NULL) { config_perror("missing user password"); return; } ret = generate_Ku(user->authProtocol, user->authProtocolLen, (u_char *) cp, strlen(cp), userKey, &userKeyLen); if (ret != SNMPERR_SUCCESS) { config_perror("setting key failed (in sc_genKu())"); return; } /* save master key */ if (user->flags & USMUSER_FLAG_KEEP_MASTER_KEY) { if (userKey == user->privKey) { user->privKeyKu = netsnmp_memdup(userKey, userKeyLen); user->privKeyKuLen = userKeyLen; } else if (userKey == user->authKey) { user->authKeyKu = netsnmp_memdup(userKey, userKeyLen); user->authKeyKuLen = userKeyLen; } } } else if (type == 1) { cp = read_config_read_octet_string(cp, &userKeyP, &userKeyLen); if (cp == NULL) { config_perror("invalid user key"); return; } } if (type < 2) { *key = (u_char *) malloc(SNMP_MAXBUF_SMALL); *keyLen = SNMP_MAXBUF_SMALL; ret = generate_kul(user->authProtocol, user->authProtocolLen, engineID, engineIDLen, userKey, userKeyLen, *key, keyLen); if (ret != SNMPERR_SUCCESS) { config_perror("setting key failed (in generate_kul())"); return; } /* * (destroy and) free the old key */ memset(userKey, 0, sizeof(userKey)); } else { /* * the key is given, copy it in */ cp = read_config_read_octet_string(cp, key, keyLen); if (cp == NULL) { config_perror("invalid localized user key"); return; } } if (key == &user->privKey) { ret = usm_extend_user_kul(user, *keyLen); if (SNMPERR_SUCCESS != ret) { config_perror("error extending localized user key"); return; } } } /* end usm_set_password() */ /* * create a usm user from a string. * * The format for the string is described in the createUser * secion of the snmpd.conf man page. * * On success, a pointer to the created usmUser struct is returned. * On error, a NULL pointer is returned. In this case, if a pointer to a * char pointer is provided in errorMsg, an error string is returned. * This error string points to a static message, and should not be * freed. */ static struct usmUser * usm_create_usmUser_from_string(char *line, const char **errorMsg) { char *cp; const char *dummy; char buf[SNMP_MAXBUF_MEDIUM]; struct usmUser *newuser; u_char userKey[SNMP_MAXBUF_SMALL], *tmpp; size_t userKeyLen = SNMP_MAXBUF_SMALL; size_t privKeySize; size_t ret; int ret2, properLen, properPrivKeyLen; const oid *def_auth_prot, *def_priv_prot; size_t def_auth_prot_len, def_priv_prot_len; const netsnmp_priv_alg_info *pai; def_auth_prot = get_default_authtype(&def_auth_prot_len); def_priv_prot = get_default_privtype(&def_priv_prot_len); if (NULL == line) return NULL; #ifdef NETSNMP_ENABLE_TESTING_CODE DEBUGMSGTL(("usmUser", "new user %s\n", line)); /* logs passphrases */ #endif if (NULL == errorMsg) errorMsg = &dummy; *errorMsg = NULL; /* no errors yet */ newuser = usm_create_user(); if (newuser == NULL) { *errorMsg = "malloc failure creating new user"; goto fail; } /* * READ: Security Name */ cp = copy_nword(line, buf, sizeof(buf)); /* * check for (undocumented) 'keep master key' flag. so far, this is * just used for users for informs (who need non-localized keys). */ if (strcmp(buf, "-M") == 0) { newuser->flags |= USMUSER_FLAG_KEEP_MASTER_KEY; cp = copy_nword(cp, buf, sizeof(buf)); } /* * might be a -e ENGINEID argument */ if (strcmp(buf, "-e") == 0) { size_t ebuf_len = 32, eout_len = 0; u_char *ebuf = (u_char *) malloc(ebuf_len); if (ebuf == NULL) { *errorMsg = "malloc failure processing -e flag"; goto fail; } /* * Get the specified engineid from the line. */ cp = copy_nword(cp, buf, sizeof(buf)); if (!snmp_hex_to_binary(&ebuf, &ebuf_len, &eout_len, 1, buf)) { *errorMsg = "invalid EngineID argument to -e"; SNMP_FREE(ebuf); goto fail; } newuser->engineID = ebuf; newuser->engineIDLen = eout_len; cp = copy_nword(cp, buf, sizeof(buf)); } else { newuser->engineID = snmpv3_generate_engineID(&ret); if (ret == 0) { goto fail; } newuser->engineIDLen = ret; } newuser->secName = strdup(buf); newuser->name = strdup(buf); if (!cp) { #ifdef NETSNMP_FORCE_SYSTEM_V3_AUTHPRIV /** no passwords ok iff defaults are noauth/nopriv */ if (snmp_oid_compare(usmNoAuthProtocol, OID_LENGTH(usmNoAuthProtocol), def_auth_prot, def_auth_prot_len) != 0) { *errorMsg = "no authentication pass phrase"; goto fail; } if (snmp_oid_compare(usmNoPrivProtocol, OID_LENGTH(usmNoPrivProtocol), def_priv_prot, def_priv_prot_len) != 0) { *errorMsg = "no privacy pass phrase"; goto fail; } #endif /* NETSNMP_FORCE_SYSTEM_V3_AUTHPRIV */ goto add; /* no authentication or privacy type */ } /* * READ: Authentication Type */ newuser->authProtocol[0] = 0; cp = copy_nword(cp, buf, sizeof(buf)); if ((strncmp(cp, "default", 7) == 0) && (NULL != def_auth_prot)) { SNMP_FREE(newuser->authProtocol); newuser->authProtocol = snmp_duplicate_objid(def_auth_prot, def_auth_prot_len); if (newuser->authProtocol == NULL) { *errorMsg = "malloc failed"; goto fail; } newuser->authProtocolLen = def_auth_prot_len; } else { const oid *auth_prot; int auth_type = usm_lookup_auth_type(buf); if (auth_type < 0) { *errorMsg = "unknown authProtocol"; goto fail; } auth_prot = sc_get_auth_oid(auth_type, &newuser->authProtocolLen); if (auth_prot) { SNMP_FREE(newuser->authProtocol); newuser->authProtocol = snmp_duplicate_objid(auth_prot, newuser->authProtocolLen); } if (newuser->authProtocol == NULL) { *errorMsg = "malloc failed"; goto fail; } } if (0 == newuser->authProtocol[0]) { *errorMsg = "Unknown authentication protocol"; goto fail; } #ifdef NETSNMP_FORCE_SYSTEM_V3_AUTHPRIV if (snmp_oid_compare(newuser->authProtocol, newuser->authProtocolLen, def_auth_prot, def_auth_prot_len) != 0) { *errorMsg = "auth protocol does not match system policy"; goto fail; } #endif /* NETSNMP_FORCE_SYSTEM_V3_AUTHPRIV */ /* * READ: Authentication Pass Phrase or key */ cp = copy_nword(cp, buf, sizeof(buf)); if (strcmp(buf,"-m") == 0) { /* a master key is specified */ cp = copy_nword(cp, buf, sizeof(buf)); ret = sizeof(userKey); tmpp = userKey; userKeyLen = 0; if (!snmp_hex_to_binary(&tmpp, &ret, &userKeyLen, 0, buf)) { *errorMsg = "invalid key value argument to -m"; goto fail; } /* save master key */ if (newuser->flags & USMUSER_FLAG_KEEP_MASTER_KEY) { newuser->authKeyKu = netsnmp_memdup(userKey, userKeyLen); newuser->authKeyKuLen = userKeyLen; } } else if (strcmp(buf,"-l") != 0) { /* a password is specified */ userKeyLen = sizeof(userKey); ret2 = generate_Ku(newuser->authProtocol, newuser->authProtocolLen, (u_char *) buf, strlen(buf), userKey, &userKeyLen); if (ret2 != SNMPERR_SUCCESS) { *errorMsg = "could not generate the authentication key from the supplied pass phrase."; goto fail; } /* save master key */ if (newuser->flags & USMUSER_FLAG_KEEP_MASTER_KEY) { newuser->authKeyKu = netsnmp_memdup(userKey, userKeyLen); newuser->authKeyKuLen = userKeyLen; } } /* * And turn it into a localized key */ properLen = sc_get_proper_auth_length_bytype( sc_get_authtype(newuser->authProtocol, newuser->authProtocolLen)); if (properLen <= 0) { *errorMsg = "Could not get proper authentication protocol key length"; goto fail; } newuser->authKey = (u_char *) malloc(properLen); newuser->authKeyLen = properLen; if (strcmp(buf,"-l") == 0) { /* a local key is directly specified */ cp = copy_nword(cp, buf, sizeof(buf)); ret = newuser->authKeyLen; newuser->authKeyLen = 0; if (!snmp_hex_to_binary(&newuser->authKey, &ret, &newuser->authKeyLen, 0, buf)) { *errorMsg = "invalid key value argument to -l"; goto fail; } if (properLen != newuser->authKeyLen) { *errorMsg = "improper key length to -l"; goto fail; } } else { ret2 = generate_kul(newuser->authProtocol, newuser->authProtocolLen, newuser->engineID, newuser->engineIDLen, userKey, userKeyLen, newuser->authKey, &newuser->authKeyLen); if (ret2 != SNMPERR_SUCCESS) { *errorMsg = "could not generate localized authentication key (Kul) from the master key (Ku)."; goto fail; } } if (!cp) { #ifndef NETSNMP_FORCE_SYSTEM_V3_AUTHPRIV goto add; /* no privacy type (which is legal) */ #else if (snmp_oid_compare(usmNoPrivProtocol, OID_LENGTH(usmNoPrivProtocol), def_priv_prot, def_priv_prot_len) == 0) goto add; else { *errorMsg = "priv protocol does not match system policy"; goto fail; } #endif /* NETSNMP_FORCE_SYSTEM_V3_AUTHPRIV */ } /* * READ: Privacy Type */ newuser->privProtocol[0] = 0; cp = copy_nword(cp, buf, sizeof(buf)); if ((strncmp(buf, "default", 7) == 0) && (NULL != def_priv_prot)) { SNMP_FREE(newuser->privProtocol); newuser->privProtocol = snmp_duplicate_objid(def_priv_prot, def_priv_prot_len); if (newuser->privProtocol == NULL) { *errorMsg = "malloc failed"; goto fail; } newuser->privProtocolLen = def_priv_prot_len; pai = sc_get_priv_alg_byoid(newuser->privProtocol, newuser->privProtocolLen); } else { int priv_type = usm_lookup_priv_type(buf); if (priv_type < 0) { *errorMsg = "unknown privProtocol"; DEBUGMSGTL(("usmUser", "%s %s\n", *errorMsg, buf)); goto fail; } DEBUGMSGTL(("9:usmUser", "privProtocol %s\n", buf)); pai = sc_get_priv_alg_bytype(priv_type); if (pai) { SNMP_FREE(newuser->privProtocol); newuser->privProtocolLen = pai->oid_len; newuser->privProtocol = snmp_duplicate_objid(pai->alg_oid, newuser->privProtocolLen); DEBUGMSGTL(("9:usmUser", "pai %s\n", pai->name)); if (newuser->privProtocol == NULL) { *errorMsg = "malloc failed"; goto fail; } } } if (NULL == pai) { *errorMsg = "priv protocol lookup failed"; goto fail; } if (0 == newuser->privProtocol[0] && NULL == *errorMsg) *errorMsg = "Unknown privacy protocol"; if (NULL != *errorMsg) goto fail; #ifdef NETSNMP_FORCE_SYSTEM_V3_AUTHPRIV if (snmp_oid_compare(newuser->privProtocol, newuser->privProtocolLen, def_priv_prot, def_priv_prot_len) != 0) { *errorMsg = "priv protocol does not match system policy"; goto fail; } #endif /* NETSNMP_FORCE_SYSTEM_V3_AUTHPRIV */ properPrivKeyLen = pai->proper_length; if (USM_CREATE_USER_PRIV_DES == pai->type) properPrivKeyLen *= 2; /* ?? we store salt with key */ /* * READ: Encryption Pass Phrase or key */ if (!cp) { /* * assume the same as the authentication key */ newuser->privKey = netsnmp_memdup(newuser->authKey, newuser->authKeyLen); privKeySize = newuser->privKeyLen = newuser->authKeyLen; if (newuser->flags & USMUSER_FLAG_KEEP_MASTER_KEY) { newuser->privKeyKu = netsnmp_memdup(newuser->authKeyKu, newuser->authKeyKuLen); newuser->privKeyKuLen = newuser->authKeyKuLen; } } else { cp = copy_nword(cp, buf, sizeof(buf)); if (strcmp(buf,"-m") == 0) { /* a master key is specified */ cp = copy_nword(cp, buf, sizeof(buf)); ret = sizeof(userKey); tmpp = userKey; userKeyLen = 0; if (!snmp_hex_to_binary(&tmpp, &ret, &userKeyLen, 0, buf)) { *errorMsg = "invalid key value argument to -m"; goto fail; } /* save master key */ if (newuser->flags & USMUSER_FLAG_KEEP_MASTER_KEY) { newuser->privKeyKu = netsnmp_memdup(userKey, userKeyLen); newuser->privKeyKuLen = userKeyLen; } } else if (strcmp(buf,"-l") != 0) { /* a password is specified */ userKeyLen = sizeof(userKey); ret2 = generate_Ku(newuser->authProtocol, newuser->authProtocolLen, (u_char*)buf, strlen(buf), userKey, &userKeyLen); if (ret2 != SNMPERR_SUCCESS) { *errorMsg = "could not generate the privacy key from the supplied pass phrase."; goto fail; } /* save master key */ if (newuser->flags & USMUSER_FLAG_KEEP_MASTER_KEY) { newuser->privKeyKu = netsnmp_memdup(userKey, userKeyLen); newuser->privKeyKuLen = userKeyLen; } } /* * And turn it into a localized key * Allocate enough space for greater of auth mac and privKey len. */ privKeySize = SNMP_MAX(properPrivKeyLen, properLen); newuser->privKey = (u_char *) malloc(privKeySize); newuser->privKeyLen = privKeySize; if (strcmp(buf,"-l") == 0) { /* a local key is directly specified */ cp = copy_nword(cp, buf, sizeof(buf)); ret = newuser->privKeyLen; newuser->privKeyLen = 0; if (!snmp_hex_to_binary(&newuser->privKey, &ret, &newuser->privKeyLen, 0, buf)) { *errorMsg = "invalid key value argument to -l"; goto fail; } } else { ret2 = generate_kul(newuser->authProtocol, newuser->authProtocolLen, newuser->engineID, newuser->engineIDLen, userKey, userKeyLen, newuser->privKey, &newuser->privKeyLen); if (ret2 != SNMPERR_SUCCESS) { *errorMsg = "could not generate localized privacy key (Kul) from the master key (Ku)."; goto fail; } } if (newuser->privKeyLen < properPrivKeyLen) { ret = usm_extend_user_kul(newuser, properPrivKeyLen); if (ret != SNMPERR_SUCCESS) { *errorMsg = "could not extend localized privacy key to required length."; goto fail; } } } if ((newuser->privKeyLen >= properPrivKeyLen) || (properPrivKeyLen == 0)){ DEBUGMSGTL(("9:usmUser", "truncating privKeyLen from %" NETSNMP_PRIz "d to %d\n", newuser->privKeyLen, properPrivKeyLen)); newuser->privKeyLen = properPrivKeyLen; } else { DEBUGMSGTL(("usmUser", "privKey length %" NETSNMP_PRIz "d < %d required by privProtocol\n", newuser->privKeyLen, properPrivKeyLen)); *errorMsg = "privKey length is less than required by privProtocol"; goto fail; } add: usm_add_user(newuser); DEBUGMSGTL(("usmUser", "created a new user %s at ", newuser->secName)); DEBUGMSGHEX(("usmUser", newuser->engineID, newuser->engineIDLen)); DEBUGMSG(("usmUser", "\n")); return newuser; fail: usm_free_user(newuser); return NULL; } void usm_parse_create_usmUser(const char *token, char *line) { const char *error = NULL; usm_create_usmUser_from_string(line, &error); if (error) config_perror(error); } static void snmpv3_authtype_conf(const char *word, char *cptr) { int auth_type = usm_lookup_auth_type(cptr); if (auth_type < 0) config_perror("Unknown authentication type"); defaultAuthType = sc_get_auth_oid(auth_type, &defaultAuthTypeLen); DEBUGMSGTL(("snmpv3", "set default authentication type: %s\n", cptr)); } const oid * get_default_authtype(size_t * len) { if (defaultAuthType == NULL) { defaultAuthType = SNMP_DEFAULT_AUTH_PROTO; defaultAuthTypeLen = SNMP_DEFAULT_AUTH_PROTOLEN; } if (len) *len = defaultAuthTypeLen; return defaultAuthType; } static void snmpv3_privtype_conf(const char *word, char *cptr) { int priv_type = usm_lookup_priv_type(cptr); if (priv_type < 0) config_perror("Unknown privacy type"); defaultPrivType = sc_get_priv_oid(priv_type, &defaultPrivTypeLen); DEBUGMSGTL(("snmpv3", "set default privacy type: %s\n", cptr)); } const oid * get_default_privtype(size_t * len) { if (defaultPrivType == NULL) { defaultPrivType = SNMP_DEFAULT_PRIV_PROTO; defaultPrivTypeLen = SNMP_DEFAULT_PRIV_PROTOLEN; } if (len) *len = defaultPrivTypeLen; return defaultPrivType; } void init_usm_conf(const char *app) { register_config_handler(app, "usmUser", usm_parse_config_usmUser, NULL, NULL); register_config_handler(app, "createUser", usm_parse_create_usmUser, NULL, "username [-e ENGINEID] (MD5|SHA|SHA-512|SHA-384|SHA-256|SHA-224|default) authpassphrase [(DES|AES|default) [privpassphrase]]"); /* * we need to be called back later */ snmp_register_callback(SNMP_CALLBACK_LIBRARY, SNMP_CALLBACK_STORE_DATA, usm_store_users, NULL); } /* * initializations for the USM. * * Should be called after the (engineid) configuration files have been read. * * Set "arbitrary" portion of salt to a random number. */ static int init_usm_post_config(int majorid, int minorid, void *serverarg, void *clientarg) { size_t salt_integer_len = sizeof(salt_integer); if (sc_random((u_char *) & salt_integer, &salt_integer_len) != SNMPERR_SUCCESS) { DEBUGMSGTL(("usm", "sc_random() failed: using time() as salt.\n")); salt_integer = (u_int) time(NULL); } #ifdef HAVE_AES salt_integer_len = sizeof (salt_integer64_1); if (sc_random((u_char *) & salt_integer64_1, &salt_integer_len) != SNMPERR_SUCCESS) { DEBUGMSGTL(("usm", "sc_random() failed: using time() as aes1 salt.\n")); salt_integer64_1 = (u_int) time(NULL); } salt_integer_len = sizeof (salt_integer64_1); if (sc_random((u_char *) & salt_integer64_2, &salt_integer_len) != SNMPERR_SUCCESS) { DEBUGMSGTL(("usm", "sc_random() failed: using time() as aes2 salt.\n")); salt_integer64_2 = (u_int) time(NULL); } #endif #ifndef NETSNMP_DISABLE_MD5 noNameUser = usm_create_initial_user("", usmHMACMD5AuthProtocol, OID_LENGTH(usmHMACMD5AuthProtocol), SNMP_DEFAULT_PRIV_PROTO, SNMP_DEFAULT_PRIV_PROTOLEN); #else noNameUser = usm_create_initial_user("", usmHMACSHA1AuthProtocol, OID_LENGTH(usmHMACSHA1AuthProtocol), SNMP_DEFAULT_PRIV_PROTO, SNMP_DEFAULT_PRIV_PROTOLEN); #endif if ( noNameUser ) { SNMP_FREE(noNameUser->engineID); noNameUser->engineIDLen = 0; } return SNMPERR_SUCCESS; } /* end init_usm_post_config() */ static int deinit_usm_post_config(int majorid, int minorid, void *serverarg, void *clientarg) { if (usm_free_user(noNameUser) != NULL) { DEBUGMSGTL(("deinit_usm_post_config", "could not free initial user\n")); return SNMPERR_GENERR; } noNameUser = NULL; DEBUGMSGTL(("deinit_usm_post_config", "initial user removed\n")); return SNMPERR_SUCCESS; } /* end deinit_usm_post_config() */ void init_usm(void) { struct snmp_secmod_def *def; char *type; DEBUGMSGTL(("init_usm", "unit_usm: %" NETSNMP_PRIo "u %" NETSNMP_PRIo "u\n", usmNoPrivProtocol[0], usmNoPrivProtocol[1])); sc_init(); /* initalize scapi code */ /* * register ourselves as a security service */ def = SNMP_MALLOC_STRUCT(snmp_secmod_def); if (def == NULL) return; /* * XXX: def->init_sess_secmod move stuff from snmp_api.c */ def->encode_reverse = usm_secmod_rgenerate_out_msg; def->encode_forward = usm_secmod_generate_out_msg; def->decode = usm_secmod_process_in_msg; def->pdu_clone = usm_clone; def->pdu_free_state_ref = usm_free_usmStateReference; def->session_setup = usm_session_init; def->handle_report = usm_handle_report; def->probe_engineid = usm_discover_engineid; def->post_probe_engineid = usm_create_user_from_session_hook; if (register_sec_mod(USM_SEC_MODEL_NUMBER, "usm", def) != SNMPERR_SUCCESS) { SNMP_FREE(def); snmp_log(LOG_ERR, "could not register usm sec mod\n"); return; } snmp_register_callback(SNMP_CALLBACK_LIBRARY, SNMP_CALLBACK_POST_PREMIB_READ_CONFIG, init_usm_post_config, NULL); snmp_register_callback(SNMP_CALLBACK_LIBRARY, SNMP_CALLBACK_SHUTDOWN, deinit_usm_post_config, NULL); snmp_register_callback(SNMP_CALLBACK_LIBRARY, SNMP_CALLBACK_SHUTDOWN, free_engineID, NULL); register_config_handler("snmp", "defAuthType", snmpv3_authtype_conf, NULL, "MD5|SHA|SHA-512|SHA-384|SHA-256|SHA-224"); register_config_handler("snmp", "defPrivType", snmpv3_privtype_conf, NULL, "DES" #ifdef HAVE_AES "|AES|AES-128" #ifdef NETSNMP_DRAFT_BLUMENTHAL_AES_04 "|AES-192|AES-256" #endif /* NETSNMP_DRAFT_BLUMENTHAL_AES_04 */ #else " (AES support not available)" #endif ); /* * Free stuff at shutdown time */ snmp_register_callback(SNMP_CALLBACK_LIBRARY, SNMP_CALLBACK_SHUTDOWN, free_enginetime_on_shutdown, NULL); type = netsnmp_ds_get_string(NETSNMP_DS_LIBRARY_ID, NETSNMP_DS_LIB_APPTYPE); register_config_handler(type, "userSetAuthPass", usm_set_password, NULL, NULL); register_config_handler(type, "userSetPrivPass", usm_set_password, NULL, NULL); register_config_handler(type, "userSetAuthKey", usm_set_password, NULL, NULL); register_config_handler(type, "userSetPrivKey", usm_set_password, NULL, NULL); register_config_handler(type, "userSetAuthLocalKey", usm_set_password, NULL, NULL); register_config_handler(type, "userSetPrivLocalKey", usm_set_password, NULL, NULL); } void shutdown_usm(void) { free_etimelist(); clear_user_list(); }
795370.c
//Classification: #default/p/DEC/CG/aS+dS/v/fpb/ln //Written by: Igor Eremeev //Reviewed by: Sergey Pomelov //Comment: #include <stdio.h> int func (int a) { printf ("%d", a); return 0; } void f(int *p) { *p = 1; } int main(void) { int a = 1; f(&a); if (a) { func (a); } return 0; }
85614.c
/** * Copyright (c) 2015 - present LibDriver All rights reserved * * The MIT License (MIT) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * @file driver_fm24clxx.c * @brief driver fm24clxx source file * @version 1.0.0 * @author Shifeng Li * @date 2021-06-15 * * <h3>history</h3> * <table> * <tr><th>Date <th>Version <th>Author <th>Description * <tr><td>2021/06/15 <td>1.0 <td>Shifeng Li <td>first upload * </table> */ #include "driver_fm24clxx.h" /** * @brief chip information definition */ #define CHIP_NAME "Cypress FM24CLXX" /**< chip name */ #define MANUFACTURER_NAME "Cypress" /**< manufacturer name */ #define SUPPLY_VOLTAGE_MIN 2.7f /**< chip min supply voltage */ #define SUPPLY_VOLTAGE_MAX 3.65f /**< chip max supply voltage */ #define MAX_CURRENT 0.1f /**< chip max current */ #define TEMPERATURE_MIN -40.0f /**< chip min operating temperature */ #define TEMPERATURE_MAX 85.0f /**< chip max operating temperature */ #define DRIVER_VERSION 1000 /**< driver version */ /** * @brief initialize the chip * @param[in] *handle points to a fm24clxx handle structure * @return status code * - 0 success * - 1 iic initialization failed * - 2 handle is NULL * - 3 linked functions is NULL * @note none */ uint8_t fm24clxx_init(fm24clxx_handle_t *handle) { if (handle == NULL) /* check handle */ { return 2; /* return error */ } if (handle->debug_print == NULL) /* check debug_print */ { return 3; /* return error */ } if (handle->iic_init == NULL) /* check iic_init */ { handle->debug_print("fm24clxx: iic_init is null.\n"); /* iic_init is null */ return 3; /* return error */ } if (handle->iic_deinit == NULL) /* check iic_deinit */ { handle->debug_print("fm24clxx: iic_deinit is null.\n"); /* iic_deinit is null */ return 3; /* return error */ } if (handle->iic_read == NULL) /* check iic_read */ { handle->debug_print("fm24clxx: iic_read is null.\n"); /* iic_read is null */ return 3; /* return error */ } if (handle->iic_write == NULL) /* check iic_write */ { handle->debug_print("fm24clxx: iic_write is null.\n"); /* iic_write is null */ return 3; /* return error */ } if (handle->iic_read_address16 == NULL) /* check iic_read_address16 */ { handle->debug_print("fm24clxx: iic_read_address16 is null.\n"); /* iic_read_address16 is null */ return 3; /* return error */ } if (handle->iic_write_address16 == NULL) /* check iic_write_address16 */ { handle->debug_print("fm24clxx: iic_write_address16 is null.\n"); /* iic_write_address16 is null */ return 3; /* return error */ } if (handle->delay_ms == NULL) /* check delay_ms */ { handle->debug_print("fm24clxx: delay_ms is null.\n"); /* delay_ms is null */ return 3; /* return error */ } if (handle->iic_init()) /* iic init */ { handle->debug_print("fm24clxx: iic init failed.\n"); /* iic init failed */ return 1; /* return error */ } handle->inited = 1; /* flag finish initialization */ return 0; /* success return 0 */ } /** * @brief close the chip * @param[in] *handle points to a fm24clxx handle structure * @return status code * - 0 success * - 1 iic deinit failed * - 2 handle is NULL * - 3 handle is not initialized * @note none */ uint8_t fm24clxx_deinit(fm24clxx_handle_t *handle) { if (handle == NULL) /* check handle */ { return 2; /* return error */ } if (handle->inited != 1) /* check handle initialization */ { return 3; /* return error */ } if (handle->iic_deinit()) /* iic deinit */ { handle->debug_print("fm24clxx: iic deinit failed.\n"); /* iic deinit failed */ return 1; /* return error */ } handle->inited = 0; /* flag close */ return 0; /* success return 0 */ } /** * @brief set the chip type * @param[in] *handle points to a fm24clxx handle structure * @param[in] type is the chip type * @return status code * - 0 success * - 2 handle is NULL * @note none */ uint8_t fm24clxx_set_type(fm24clxx_handle_t *handle, fm24clxx_t type) { if (handle == NULL) /* check handle */ { return 2; /* return error */ } handle->id = type; /* set id */ return 0; /* success return 0 */ } /** * @brief get the chip type * @param[in] *handle points to a fm24clxx handle structure * @param[out] *type points to a chip type buffer * @return status code * - 0 success * - 2 handle is NULL * @note none */ uint8_t fm24clxx_get_type(fm24clxx_handle_t *handle, fm24clxx_t *type) { if (handle == NULL) /* check handle */ { return 2; /* return error */ } *type = (fm24clxx_t)(handle->id); /* get id */ return 0; /* success return 0 */ } /** * @brief set the chip address pin * @param[in] *handle points to a fm24clxx handle structure * @param[in] addr_pin is the chip address pin * @return status code * - 0 success * - 2 handle is NULL * @note none */ uint8_t fm24clxx_set_addr_pin(fm24clxx_handle_t *handle, fm24clxx_address_t addr_pin) { if (handle == NULL) /* check handle */ { return 2; /* return error */ } handle->iic_addr = 0xA0; /* set iic addr */ handle->iic_addr |= addr_pin << 1; /* set iic address */ return 0; /* success return 0 */ } /** * @brief get the chip address pin * @param[in] *handle points to a fm24clxx handle structure * @param[out] *addr_pin points to a chip address pin * @return status code * - 0 success * - 2 handle is NULL * @note none */ uint8_t fm24clxx_get_addr_pin(fm24clxx_handle_t *handle, fm24clxx_address_t *addr_pin) { if (handle == NULL) /* check handle */ { return 2; /* return error */ } *addr_pin = (fm24clxx_address_t)((handle->iic_addr & (~0xA0)) >> 1); /* get iic address */ return 0; /* success return 0 */ } /** * @brief read bytes from the chip * @param[in] *handle points to a fm24clxx handle structure * @param[in] address is the register address * @param[out] *buf points to a data buffer * @param[in] len is the buffer length * @return status code * - 0 success * - 1 read data failed * - 2 handle is NULL * - 3 handle is not initialized * - 4 end address is over the max address * @note none */ uint8_t fm24clxx_read(fm24clxx_handle_t *handle, uint16_t address, uint8_t *buf, uint16_t len) { volatile uint8_t page_remain; if (handle == NULL) /* check handle */ { return 2; /* return error */ } if (handle->inited != 1) /* check handle initialization */ { return 3; /* return error */ } if ((address + len) > handle->id) /* check length */ { handle->debug_print("fm24clxx: read out of range.\n"); /* read out of range */ return 4; /* return error */ } page_remain = 8 - address % 8; /* get page remain */ if (len <= page_remain) /* page remain */ { page_remain = len; /* set page remain */ } if (handle->id > FM24CL16B) /* choose id to set different address */ { while (1) { if (handle->iic_read_address16(handle->iic_addr, address, buf, page_remain)) /* read data */ { handle->debug_print("fm24clxx: read failed.\n"); /* read failed */ return 1; /* return error */ } if (page_remain == len) /* check break */ { break; /* break loop */ } else { address += page_remain; /* address increase */ buf += page_remain; /* buffer point increase */ len -= page_remain; /* length decrease */ if (len < 8) /* check length */ { page_remain = len; /* set the reset length */ } else { page_remain = 8; /* set page */ } } } } else { while (1) { if (handle->iic_read(handle->iic_addr+((address/256)<<1), address%256, buf, page_remain)) /* read page */ { handle->debug_print("fm24clxx: read failed.\n"); /* read failed */ return 1; /* return error */ } if (page_remain == len) /* check break */ { break; /* break loop */ } else { address += page_remain; /* address increase */ buf += page_remain; /* buffer point increase */ len -= page_remain; /* length decrease */ if (len < 8) /* check length */ { page_remain = len; /* set the reset length */ } else { page_remain = 8; /* set page */ } } } } return 0; /* success return 0 */ } /** * @brief write bytes to the chip * @param[in] *handle points to a fm24clxx handle structure * @param[in] address is the register address * @param[in] *buf points to a data buffer * @param[in] len is the buffer length * @return status code * - 0 success * - 1 write data failed * - 2 handle is NULL * - 3 handle is not initialized * - 4 end address is over the max address * @note none */ uint8_t fm24clxx_write(fm24clxx_handle_t *handle, uint16_t address, uint8_t *buf, uint16_t len) { volatile uint8_t page_remain; if (handle == NULL) /* check handle */ { return 2; /* return error */ } if (handle->inited != 1) /* check handle initialization */ { return 3; /* return error */ } if ((address + len) > handle->id) /* check length */ { handle->debug_print("fm24clxx: write out of range.\n"); /* write out of range */ return 1; /* return error */ } page_remain = 8 - address % 8; /* set page remain */ if (len <= page_remain) /* check length */ { page_remain = len; /* set page remain */ } if (handle->id > FM24CL16B) /* check id */ { while (1) { if (handle->iic_write_address16(handle->iic_addr, address, buf, page_remain)) /* write data */ { handle->debug_print("fm24clxx: write failed.\n"); /* write failed */ return 1; /* return error */ } if (page_remain == len) /* check break */ { break; /* break */ } else { address += page_remain; /* address increase */ buf += page_remain; /* buffer point increase */ len -= page_remain; /* length decrease */ if (len < 8) /* check length */ { page_remain = len; /* set the reset length */ } else { page_remain = 8; /* set page */ } } } } else { while (1) { if (handle->iic_write(handle->iic_addr+((address/256)<<1), address%256, buf, page_remain)) /* write page */ { handle->debug_print("fm24clxx: write failed.\n"); /* write failed */ return 1; /* return error */ } if (page_remain == len) /* check break */ { break; /* break */ } else { address += page_remain; /* address increase */ buf += page_remain; /* buffer point increase */ len -= page_remain; /* length decrease */ if (len < 8) /* check length */ { page_remain = len; /* set the rest length */ } else { page_remain = 8; /* set page */ } } } } return 0; /* success return 0 */ } /** * @brief get chip's information * @param[out] *info points to a fm24clxx info structure * @return status code * - 0 success * - 2 handle is NULL * @note none */ uint8_t fm24clxx_info(fm24clxx_info_t *info) { if (info == NULL) /* check handle */ { return 2; /* return error */ } memset(info, 0, sizeof(fm24clxx_info_t)); /* initialize fm24clxx info structure */ strncpy(info->chip_name, CHIP_NAME, 32); /* copy chip name */ strncpy(info->manufacturer_name, MANUFACTURER_NAME, 32); /* copy manufacturer name */ strncpy(info->interface, "IIC", 8); /* copy interface name */ info->supply_voltage_min_v = SUPPLY_VOLTAGE_MIN; /* set minimal supply voltage */ info->supply_voltage_max_v = SUPPLY_VOLTAGE_MAX; /* set maximum supply voltage */ info->max_current_ma = MAX_CURRENT; /* set maximum current */ info->temperature_max = TEMPERATURE_MAX; /* set minimal temperature */ info->temperature_min = TEMPERATURE_MIN; /* set maximum temperature */ info->driver_version = DRIVER_VERSION; /* set driver verison */ return 0; /* success return 0 */ }
192786.c
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <string.h> #include "freertos/FreeRTOS.h" #include "freertos/task.h" #include "bt.h" #include "esp_log.h" static const char *tag = "BLE_ADV"; #define HCI_H4_CMD_PREAMBLE_SIZE (4) /* HCI Command opcode group field(OGF) */ #define HCI_GRP_HOST_CONT_BASEBAND_CMDS (0x03 << 10) /* 0x0C00 */ #define HCI_GRP_BLE_CMDS (0x08 << 10) #define HCI_RESET (0x0003 | HCI_GRP_HOST_CONT_BASEBAND_CMDS) #define HCI_BLE_WRITE_ADV_ENABLE (0x000A | HCI_GRP_BLE_CMDS) #define HCI_BLE_WRITE_ADV_PARAMS (0x0006 | HCI_GRP_BLE_CMDS) #define HCI_BLE_WRITE_ADV_DATA (0x0008 | HCI_GRP_BLE_CMDS) #define HCIC_PARAM_SIZE_WRITE_ADV_ENABLE (1) #define HCIC_PARAM_SIZE_BLE_WRITE_ADV_PARAMS (15) #define HCIC_PARAM_SIZE_BLE_WRITE_ADV_DATA (31) #define BD_ADDR_LEN (6) /* Device address length */ typedef uint8_t bd_addr_t[BD_ADDR_LEN]; /* Device address */ #define UINT16_TO_STREAM(p, u16) {*(p)++ = (uint8_t)(u16); *(p)++ = (uint8_t)((u16) >> 8);} #define UINT8_TO_STREAM(p, u8) {*(p)++ = (uint8_t)(u8);} #define BDADDR_TO_STREAM(p, a) {int ijk; for (ijk = 0; ijk < BD_ADDR_LEN; ijk++) *(p)++ = (uint8_t) a[BD_ADDR_LEN - 1 - ijk];} #define ARRAY_TO_STREAM(p, a, len) {int ijk; for (ijk = 0; ijk < len; ijk++) *(p)++ = (uint8_t) a[ijk];} enum { H4_TYPE_COMMAND = 1, H4_TYPE_ACL = 2, H4_TYPE_SCO = 3, H4_TYPE_EVENT = 4 }; static uint8_t hci_cmd_buf[128]; /* * @brief: BT controller callback function, used to notify the upper layer that * controller is ready to receive command */ static void controller_rcv_pkt_ready(void) { printf("controller rcv pkt ready\n"); } /* * @brief: BT controller callback function, to transfer data packet to upper * controller is ready to receive command */ static int host_rcv_pkt(uint8_t *data, uint16_t len) { printf("host rcv pkt: "); for (uint16_t i = 0; i < len; i++) { printf("%02x", data[i]); } printf("\n"); return 0; } static esp_vhci_host_callback_t vhci_host_cb = { controller_rcv_pkt_ready, host_rcv_pkt }; static uint16_t make_cmd_reset(uint8_t *buf) { UINT8_TO_STREAM (buf, H4_TYPE_COMMAND); UINT16_TO_STREAM (buf, HCI_RESET); UINT8_TO_STREAM (buf, 0); return HCI_H4_CMD_PREAMBLE_SIZE; } static uint16_t make_cmd_ble_set_adv_enable (uint8_t *buf, uint8_t adv_enable) { UINT8_TO_STREAM (buf, H4_TYPE_COMMAND); UINT16_TO_STREAM (buf, HCI_BLE_WRITE_ADV_ENABLE); UINT8_TO_STREAM (buf, HCIC_PARAM_SIZE_WRITE_ADV_ENABLE); UINT8_TO_STREAM (buf, adv_enable); return HCI_H4_CMD_PREAMBLE_SIZE + HCIC_PARAM_SIZE_WRITE_ADV_ENABLE; } static uint16_t make_cmd_ble_set_adv_param (uint8_t *buf, uint16_t adv_int_min, uint16_t adv_int_max, uint8_t adv_type, uint8_t addr_type_own, uint8_t addr_type_dir, bd_addr_t direct_bda, uint8_t channel_map, uint8_t adv_filter_policy) { UINT8_TO_STREAM (buf, H4_TYPE_COMMAND); UINT16_TO_STREAM (buf, HCI_BLE_WRITE_ADV_PARAMS); UINT8_TO_STREAM (buf, HCIC_PARAM_SIZE_BLE_WRITE_ADV_PARAMS ); UINT16_TO_STREAM (buf, adv_int_min); UINT16_TO_STREAM (buf, adv_int_max); UINT8_TO_STREAM (buf, adv_type); UINT8_TO_STREAM (buf, addr_type_own); UINT8_TO_STREAM (buf, addr_type_dir); BDADDR_TO_STREAM (buf, direct_bda); UINT8_TO_STREAM (buf, channel_map); UINT8_TO_STREAM (buf, adv_filter_policy); return HCI_H4_CMD_PREAMBLE_SIZE + HCIC_PARAM_SIZE_BLE_WRITE_ADV_PARAMS; } static uint16_t make_cmd_ble_set_adv_data(uint8_t *buf, uint8_t data_len, uint8_t *p_data) { UINT8_TO_STREAM (buf, H4_TYPE_COMMAND); UINT16_TO_STREAM (buf, HCI_BLE_WRITE_ADV_DATA); UINT8_TO_STREAM (buf, HCIC_PARAM_SIZE_BLE_WRITE_ADV_DATA + 1); memset(buf, 0, HCIC_PARAM_SIZE_BLE_WRITE_ADV_DATA); if (p_data != NULL && data_len > 0) { if (data_len > HCIC_PARAM_SIZE_BLE_WRITE_ADV_DATA) { data_len = HCIC_PARAM_SIZE_BLE_WRITE_ADV_DATA; } UINT8_TO_STREAM (buf, data_len); ARRAY_TO_STREAM (buf, p_data, data_len); } return HCI_H4_CMD_PREAMBLE_SIZE + HCIC_PARAM_SIZE_BLE_WRITE_ADV_DATA + 1; } static void hci_cmd_send_reset(void) { uint16_t sz = make_cmd_reset (hci_cmd_buf); esp_vhci_host_send_packet(hci_cmd_buf, sz); } static void hci_cmd_send_ble_adv_start(void) { uint16_t sz = make_cmd_ble_set_adv_enable (hci_cmd_buf, 1); esp_vhci_host_send_packet(hci_cmd_buf, sz); } static void hci_cmd_send_ble_set_adv_param(void) { uint16_t adv_intv_min = 256; // 160ms uint16_t adv_intv_max = 256; // 160ms uint8_t adv_type = 0; // connectable undirected advertising (ADV_IND) uint8_t own_addr_type = 0; // Public Device Address uint8_t peer_addr_type = 0; // Public Device Address uint8_t peer_addr[6] = {0x80, 0x81, 0x82, 0x83, 0x84, 0x85}; uint8_t adv_chn_map = 0x07; // 37, 38, 39 uint8_t adv_filter_policy = 0; // Process All Conn and Scan uint16_t sz = make_cmd_ble_set_adv_param(hci_cmd_buf, adv_intv_min, adv_intv_max, adv_type, own_addr_type, peer_addr_type, peer_addr, adv_chn_map, adv_filter_policy); esp_vhci_host_send_packet(hci_cmd_buf, sz); } static void hci_cmd_send_ble_set_adv_data(void) { char *adv_name = "ESP-BLE-HELLO"; uint8_t name_len = (uint8_t)strlen(adv_name); uint8_t adv_data[31] = {0x02, 0x01, 0x06, 0x0, 0x09}; uint8_t adv_data_len; adv_data[3] = name_len + 1; for (int i = 0; i < name_len; i++) { adv_data[5 + i] = (uint8_t)adv_name[i]; } adv_data_len = 5 + name_len; uint16_t sz = make_cmd_ble_set_adv_data(hci_cmd_buf, adv_data_len, (uint8_t *)adv_data); esp_vhci_host_send_packet(hci_cmd_buf, sz); } /* * @brief: send HCI commands to perform BLE advertising; */ void bleAdvtTask(void *pvParameters) { int cmd_cnt = 0; bool send_avail = false; esp_vhci_host_register_callback(&vhci_host_cb); printf("BLE advt task start\n"); while (1) { vTaskDelay(1000 / portTICK_PERIOD_MS); send_avail = esp_vhci_host_check_send_available(); if (send_avail) { switch (cmd_cnt) { case 0: hci_cmd_send_reset(); ++cmd_cnt; break; case 1: hci_cmd_send_ble_set_adv_param(); ++cmd_cnt; break; case 2: hci_cmd_send_ble_set_adv_data(); ++cmd_cnt; break; case 3: hci_cmd_send_ble_adv_start(); ++cmd_cnt; break; } } printf("BLE Advertise, flag_send_avail: %d, cmd_sent: %d\n", send_avail, cmd_cnt); } } void app_main() { esp_bt_controller_config_t bt_cfg = BT_CONTROLLER_INIT_CONFIG_DEFAULT(); if (esp_bt_controller_init(&bt_cfg) != ESP_OK) { ESP_LOGI(tag, "Bluetooth controller initialize failed"); return; } if (esp_bt_controller_enable(ESP_BT_MODE_BTDM) != ESP_OK) { ESP_LOGI(tag, "Bluetooth controller enable failed"); return; } xTaskCreatePinnedToCore(&bleAdvtTask, "bleAdvtTask", 2048, NULL, 5, NULL, 0); }
173148.c
/** * @license Apache-2.0 * * Copyright (c) 2021 The Stdlib Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "stdlib/ndarray/base/broadcast_shapes.h" #include <stdint.h> #include <stdio.h> #include <inttypes.h> int main() { int64_t N1 = 4; int64_t sh1[] = { 8, 1, 6, 1 }; int64_t N2 = 3; int64_t sh2[] = { 7, 1, 5 }; int64_t ndims[] = { N1, N2 }; int64_t *shapes[] = { sh1, sh2 }; int64_t out[] = { 0, 0, 0, 0 }; int8_t status = stdlib_ndarray_broadcast_shapes( 2, shapes, ndims, out ); if ( status != 0 ) { printf( "incompatible shapes\n" ); return 1; } int64_t i; printf( "shape = ( " ); for ( i = 0; i < N1; i++ ) { printf( "%"PRId64"", out[ i ] ); if ( i < N1-1 ) { printf( ", " ); } } printf( " )\n" ); return 0; }
866194.c
/* Copyright JS Foundation and other contributors, http://js.foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "ecma-builtin-helpers.h" #include "ecma-builtins.h" #if ENABLED (JERRY_ES2015) #define ECMA_BUILTINS_INTERNAL #include "ecma-builtins-internal.h" #define BUILTIN_INC_HEADER_NAME "ecma-builtin-array-prototype-unscopables.inc.h" #define BUILTIN_UNDERSCORED_ID array_prototype_unscopables #include "ecma-builtin-internal-routines-template.inc.h" #endif /* ENABLED (JERRY_ES2015) */
752170.c
#include "stm32f4.h" #include "gpio.h" #include "led.h" /* * Set or reset the state of a GPIO pin. This is done * using the BSRRL/BSRRH registers which effectively do * a read/modify/write to change the bits. */ void set_led(uint32_t led, uint8_t state) { if (state) { GPIOD->BSRRL = led; } else { GPIOD->BSRRH = led; } }
1001911.c
#include "kernel.h" #include "file.h" #include "cdev.h" struct cdevsw cdevsw[NCDEV]; void register_cdev(int major, struct cdevsw *cdev) { cdevsw[major].read = cdev->read; cdevsw[major].write = cdev->write; }
775636.c
/* smbk5pwd.c - Overlay for managing Samba and Heimdal passwords */ /* $OpenLDAP$ */ /* This work is part of OpenLDAP Software <http://www.openldap.org/>. * * Copyright 2004-2018 The OpenLDAP Foundation. * Portions Copyright 2004-2005 by Howard Chu, Symas Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted only as authorized by the OpenLDAP * Public License. * * A copy of this license is available in the file LICENSE in the * top-level directory of the distribution or, alternatively, at * <http://www.OpenLDAP.org/license.html>. */ /* ACKNOWLEDGEMENTS: * Support for table-driven configuration added by Pierangelo Masarati. * Support for sambaPwdMustChange and sambaPwdCanChange added by Marco D'Ettorre. * Support for shadowLastChange added by SATOH Fumiyasu @ OSS Technology, Inc. */ #include <portable.h> #ifndef SLAPD_OVER_SMBK5PWD #define SLAPD_OVER_SMBK5PWD SLAPD_MOD_DYNAMIC #endif #ifdef SLAPD_OVER_SMBK5PWD #include <slap.h> #include <ac/errno.h> #include <ac/string.h> #include "config.h" #ifdef DO_KRB5 #include <lber.h> #include <lber_pvt.h> #include <lutil.h> /* make ASN1_MALLOC_ENCODE use our allocator */ #define malloc ch_malloc #include <krb5.h> #include <kadm5/admin.h> #include <hdb.h> #ifndef HDB_INTERFACE_VERSION #define HDB_MASTER_KEY_SET master_key_set #else #define HDB_MASTER_KEY_SET hdb_master_key_set #endif static krb5_context context; static void *kadm_context; static kadm5_config_params conf; static HDB *db; static AttributeDescription *ad_krb5Key; static AttributeDescription *ad_krb5KeyVersionNumber; static AttributeDescription *ad_krb5PrincipalName; static AttributeDescription *ad_krb5ValidEnd; static ObjectClass *oc_krb5KDCEntry; #endif #ifdef DO_SAMBA #ifdef HAVE_GNUTLS #include <nettle/des.h> #include <nettle/md4.h> typedef unsigned char DES_cblock[8]; #elif HAVE_OPENSSL #include <openssl/des.h> #include <openssl/md4.h> #else #error Unsupported crypto backend. #endif #include "ldap_utf8.h" static AttributeDescription *ad_sambaLMPassword; static AttributeDescription *ad_sambaNTPassword; static AttributeDescription *ad_sambaPwdLastSet; static AttributeDescription *ad_sambaPwdMustChange; static AttributeDescription *ad_sambaPwdCanChange; static ObjectClass *oc_sambaSamAccount; #endif #ifdef DO_SHADOW static AttributeDescription *ad_shadowLastChange; static ObjectClass *oc_shadowAccount; #endif /* Per-instance configuration information */ typedef struct smbk5pwd_t { unsigned mode; #define SMBK5PWD_F_KRB5 (0x1U) #define SMBK5PWD_F_SAMBA (0x2U) #define SMBK5PWD_F_SHADOW (0x4U) #define SMBK5PWD_DO_KRB5(pi) ((pi)->mode & SMBK5PWD_F_KRB5) #define SMBK5PWD_DO_SAMBA(pi) ((pi)->mode & SMBK5PWD_F_SAMBA) #define SMBK5PWD_DO_SHADOW(pi) ((pi)->mode & SMBK5PWD_F_SHADOW) #ifdef DO_KRB5 /* nothing yet */ #endif #ifdef DO_SAMBA /* How many seconds before forcing a password change? */ time_t smb_must_change; /* How many seconds after allowing a password change? */ time_t smb_can_change; #endif #ifdef DO_SHADOW /* nothing yet */ #endif } smbk5pwd_t; static const unsigned SMBK5PWD_F_ALL = 0 #ifdef DO_KRB5 | SMBK5PWD_F_KRB5 #endif #ifdef DO_SAMBA | SMBK5PWD_F_SAMBA #endif #ifdef DO_SHADOW | SMBK5PWD_F_SHADOW #endif ; static int smbk5pwd_modules_init( smbk5pwd_t *pi ); #ifdef DO_SAMBA static const char hex[] = "0123456789abcdef"; /* From liblutil/passwd.c... */ static void lmPasswd_to_key( const char *lmPasswd, DES_cblock *key) { const unsigned char *lpw = (const unsigned char *)lmPasswd; unsigned char *k = (unsigned char *)key; /* make room for parity bits */ k[0] = lpw[0]; k[1] = ((lpw[0]&0x01)<<7) | (lpw[1]>>1); k[2] = ((lpw[1]&0x03)<<6) | (lpw[2]>>2); k[3] = ((lpw[2]&0x07)<<5) | (lpw[3]>>3); k[4] = ((lpw[3]&0x0F)<<4) | (lpw[4]>>4); k[5] = ((lpw[4]&0x1F)<<3) | (lpw[5]>>5); k[6] = ((lpw[5]&0x3F)<<2) | (lpw[6]>>6); k[7] = ((lpw[6]&0x7F)<<1); #ifdef HAVE_OPENSSL DES_set_odd_parity( key ); #endif } #define MAX_PWLEN 256 #define HASHLEN 16 static void hexify( const char in[HASHLEN], struct berval *out ) { int i; char *a; unsigned char *b; out->bv_val = ch_malloc(HASHLEN*2 + 1); out->bv_len = HASHLEN*2; a = out->bv_val; b = (unsigned char *)in; for (i=0; i<HASHLEN; i++) { *a++ = hex[*b >> 4]; *a++ = hex[*b++ & 0x0f]; } *a++ = '\0'; } static void lmhash( struct berval *passwd, struct berval *hash ) { char UcasePassword[15]; DES_cblock key; DES_cblock StdText = "KGS!@#$%"; DES_cblock hbuf[2]; #ifdef HAVE_OPENSSL DES_key_schedule schedule; #elif defined(HAVE_GNUTLS) struct des_ctx ctx; #endif strncpy( UcasePassword, passwd->bv_val, 14 ); UcasePassword[14] = '\0'; ldap_pvt_str2upper( UcasePassword ); lmPasswd_to_key( UcasePassword, &key ); #ifdef HAVE_GNUTLS des_set_key( &ctx, key ); des_encrypt( &ctx, sizeof(key), hbuf[0], StdText ); lmPasswd_to_key( &UcasePassword[7], &key ); des_set_key( &ctx, key ); des_encrypt( &ctx, sizeof(key), hbuf[1], StdText ); #elif defined(HAVE_OPENSSL) DES_set_key_unchecked( &key, &schedule ); DES_ecb_encrypt( &StdText, &hbuf[0], &schedule , DES_ENCRYPT ); lmPasswd_to_key( &UcasePassword[7], &key ); DES_set_key_unchecked( &key, &schedule ); DES_ecb_encrypt( &StdText, &hbuf[1], &schedule , DES_ENCRYPT ); #endif hexify( (char *)hbuf, hash ); } static void nthash( struct berval *passwd, struct berval *hash ) { /* Windows currently only allows 14 character passwords, but * may support up to 256 in the future. We assume this means * 256 UCS2 characters, not 256 bytes... */ char hbuf[HASHLEN]; #ifdef HAVE_OPENSSL MD4_CTX ctx; #elif defined(HAVE_GNUTLS) struct md4_ctx ctx; #endif if (passwd->bv_len > MAX_PWLEN*2) passwd->bv_len = MAX_PWLEN*2; #ifdef HAVE_OPENSSL MD4_Init( &ctx ); MD4_Update( &ctx, passwd->bv_val, passwd->bv_len ); MD4_Final( (unsigned char *)hbuf, &ctx ); #elif defined(HAVE_GNUTLS) md4_init( &ctx ); md4_update( &ctx, passwd->bv_len, (unsigned char *)passwd->bv_val ); md4_digest( &ctx, sizeof(hbuf), (unsigned char *)hbuf ); #endif hexify( hbuf, hash ); } #endif /* DO_SAMBA */ #ifdef DO_KRB5 static int smbk5pwd_op_cleanup( Operation *op, SlapReply *rs ) { slap_callback *cb; /* clear out the current key */ ldap_pvt_thread_pool_setkey( op->o_threadctx, smbk5pwd_op_cleanup, NULL, 0, NULL, NULL ); /* free the callback */ cb = op->o_callback; op->o_callback = cb->sc_next; op->o_tmpfree( cb, op->o_tmpmemctx ); return 0; } static int smbk5pwd_op_bind( Operation *op, SlapReply *rs ) { /* If this is a simple Bind, stash the Op pointer so our chk * function can find it. Set a cleanup callback to clear it * out when the Bind completes. */ if ( op->oq_bind.rb_method == LDAP_AUTH_SIMPLE ) { slap_callback *cb; ldap_pvt_thread_pool_setkey( op->o_threadctx, smbk5pwd_op_cleanup, op, 0, NULL, NULL ); cb = op->o_tmpcalloc( 1, sizeof(slap_callback), op->o_tmpmemctx ); cb->sc_cleanup = smbk5pwd_op_cleanup; cb->sc_next = op->o_callback; op->o_callback = cb; } return SLAP_CB_CONTINUE; } static LUTIL_PASSWD_CHK_FUNC k5key_chk; static LUTIL_PASSWD_HASH_FUNC k5key_hash; static const struct berval k5key_scheme = BER_BVC("{K5KEY}"); /* This password scheme stores no data in the userPassword attribute * other than the scheme name. It assumes the invoking entry is a * krb5KDCentry and compares the passed-in credentials against the * krb5Key attribute. The krb5Key may be multi-valued, but they are * simply multiple keytypes generated from the same input string, so * only the first value needs to be compared here. * * Since the lutil_passwd API doesn't pass the Entry object in, we * have to fetch it ourselves in order to get access to the other * attributes. We accomplish this with the help of the overlay's Bind * function, which stores the current Operation pointer in thread-specific * storage so we can retrieve it here. The Operation provides all * the necessary context for us to get Entry from the database. */ static int k5key_chk( const struct berval *sc, const struct berval *passwd, const struct berval *cred, const char **text ) { void *ctx, *op_tmp; Operation *op; int rc; Entry *e; Attribute *a; krb5_error_code ret; krb5_keyblock key; krb5_salt salt; hdb_entry ent; /* Find our thread context, find our Operation */ ctx = ldap_pvt_thread_pool_context(); if ( ldap_pvt_thread_pool_getkey( ctx, smbk5pwd_op_cleanup, &op_tmp, NULL ) || !op_tmp ) return LUTIL_PASSWD_ERR; op = op_tmp; rc = be_entry_get_rw( op, &op->o_req_ndn, NULL, NULL, 0, &e ); if ( rc != LDAP_SUCCESS ) return LUTIL_PASSWD_ERR; rc = LUTIL_PASSWD_ERR; do { size_t l; Key ekey = {0}; a = attr_find( e->e_attrs, ad_krb5PrincipalName ); if (!a ) break; memset( &ent, 0, sizeof(ent) ); ret = krb5_parse_name(context, a->a_vals[0].bv_val, &ent.principal); if ( ret ) break; a = attr_find( e->e_attrs, ad_krb5ValidEnd ); if (a) { struct lutil_tm tm; struct lutil_timet tt; if ( lutil_parsetime( a->a_vals[0].bv_val, &tm ) == 0 && lutil_tm2time( &tm, &tt ) == 0 && tt.tt_usec < op->o_time ) { /* Account is expired */ rc = LUTIL_PASSWD_ERR; break; } } krb5_get_pw_salt( context, ent.principal, &salt ); krb5_free_principal( context, ent.principal ); a = attr_find( e->e_attrs, ad_krb5Key ); if ( !a ) break; ent.keys.len = 1; ent.keys.val = &ekey; decode_Key((unsigned char *) a->a_vals[0].bv_val, (size_t) a->a_vals[0].bv_len, &ent.keys.val[0], &l); if ( db->HDB_MASTER_KEY_SET ) hdb_unseal_keys( context, db, &ent ); krb5_string_to_key_salt( context, ekey.key.keytype, cred->bv_val, salt, &key ); krb5_free_salt( context, salt ); if ( memcmp( ekey.key.keyvalue.data, key.keyvalue.data, key.keyvalue.length ) == 0 ) rc = LUTIL_PASSWD_OK; krb5_free_keyblock_contents( context, &key ); krb5_free_keyblock_contents( context, &ekey.key ); } while(0); be_entry_release_r( op, e ); return rc; } static int k5key_hash( const struct berval *scheme, const struct berval *passwd, struct berval *hash, const char **text ) { ber_dupbv( hash, (struct berval *)&k5key_scheme ); return LUTIL_PASSWD_OK; } #endif /* DO_KRB5 */ static int smbk5pwd_exop_passwd( Operation *op, SlapReply *rs ) { int rc; req_pwdexop_s *qpw = &op->oq_pwdexop; Entry *e; Modifications *ml; slap_overinst *on = (slap_overinst *)op->o_bd->bd_info; smbk5pwd_t *pi = on->on_bi.bi_private; char term; /* Not the operation we expected, pass it on... */ if ( ber_bvcmp( &slap_EXOP_MODIFY_PASSWD, &op->ore_reqoid ) ) { return SLAP_CB_CONTINUE; } op->o_bd->bd_info = (BackendInfo *)on->on_info; rc = be_entry_get_rw( op, &op->o_req_ndn, NULL, NULL, 0, &e ); if ( rc != LDAP_SUCCESS ) return rc; term = qpw->rs_new.bv_val[qpw->rs_new.bv_len]; qpw->rs_new.bv_val[qpw->rs_new.bv_len] = '\0'; #ifdef DO_KRB5 /* Kerberos stuff */ do { krb5_error_code ret; hdb_entry ent; struct berval *keys; size_t nkeys; int kvno, i; Attribute *a; if ( !SMBK5PWD_DO_KRB5( pi ) ) break; if ( !is_entry_objectclass(e, oc_krb5KDCEntry, 0 ) ) break; a = attr_find( e->e_attrs, ad_krb5PrincipalName ); if ( !a ) break; memset( &ent, 0, sizeof(ent) ); ret = krb5_parse_name(context, a->a_vals[0].bv_val, &ent.principal); if ( ret ) break; a = attr_find( e->e_attrs, ad_krb5KeyVersionNumber ); kvno = 0; if ( a ) { if ( lutil_atoi( &kvno, a->a_vals[0].bv_val ) != 0 ) { Debug( LDAP_DEBUG_ANY, "%s smbk5pwd EXOP: " "dn=\"%s\" unable to parse krb5KeyVersionNumber=\"%s\"\n", op->o_log_prefix, e->e_name.bv_val, a->a_vals[0].bv_val ); } } else { /* shouldn't happen, this is a required attr */ Debug( LDAP_DEBUG_ANY, "%s smbk5pwd EXOP: " "dn=\"%s\" missing krb5KeyVersionNumber\n", op->o_log_prefix, e->e_name.bv_val, 0 ); } ret = hdb_generate_key_set_password(context, ent.principal, qpw->rs_new.bv_val, &ent.keys.val, &nkeys); ent.keys.len = nkeys; hdb_seal_keys(context, db, &ent); krb5_free_principal( context, ent.principal ); keys = ch_malloc( (ent.keys.len + 1) * sizeof(struct berval)); for (i = 0; i < ent.keys.len; i++) { unsigned char *buf; size_t len; ASN1_MALLOC_ENCODE(Key, buf, len, &ent.keys.val[i], &len, ret); if (ret != 0) break; keys[i].bv_val = (char *)buf; keys[i].bv_len = len; } BER_BVZERO( &keys[i] ); hdb_free_keys(context, ent.keys.len, ent.keys.val); if ( i != ent.keys.len ) { ber_bvarray_free( keys ); break; } ml = ch_malloc(sizeof(Modifications)); if (!qpw->rs_modtail) qpw->rs_modtail = &ml->sml_next; ml->sml_next = qpw->rs_mods; qpw->rs_mods = ml; ml->sml_desc = ad_krb5Key; ml->sml_op = LDAP_MOD_REPLACE; #ifdef SLAP_MOD_INTERNAL ml->sml_flags = SLAP_MOD_INTERNAL; #endif ml->sml_numvals = i; ml->sml_values = keys; ml->sml_nvalues = NULL; ml = ch_malloc(sizeof(Modifications)); ml->sml_next = qpw->rs_mods; qpw->rs_mods = ml; ml->sml_desc = ad_krb5KeyVersionNumber; ml->sml_op = LDAP_MOD_REPLACE; #ifdef SLAP_MOD_INTERNAL ml->sml_flags = SLAP_MOD_INTERNAL; #endif ml->sml_numvals = 1; ml->sml_values = ch_malloc( 2 * sizeof(struct berval)); ml->sml_values[0].bv_val = ch_malloc( 64 ); ml->sml_values[0].bv_len = sprintf(ml->sml_values[0].bv_val, "%d", kvno+1 ); BER_BVZERO( &ml->sml_values[1] ); ml->sml_nvalues = NULL; } while ( 0 ); #endif /* DO_KRB5 */ #ifdef DO_SAMBA /* Samba stuff */ if ( SMBK5PWD_DO_SAMBA( pi ) && is_entry_objectclass(e, oc_sambaSamAccount, 0 ) ) { struct berval *keys; ber_len_t j,l; wchar_t *wcs, wc; char *c, *d; struct berval pwd; /* Expand incoming UTF8 string to UCS4 */ l = ldap_utf8_chars(qpw->rs_new.bv_val); wcs = ch_malloc((l+1) * sizeof(wchar_t)); ldap_x_utf8s_to_wcs( wcs, qpw->rs_new.bv_val, l ); /* Truncate UCS4 to UCS2 */ c = (char *)wcs; for (j=0; j<l; j++) { wc = wcs[j]; *c++ = wc & 0xff; *c++ = (wc >> 8) & 0xff; } *c++ = 0; pwd.bv_val = (char *)wcs; pwd.bv_len = l * 2; ml = ch_malloc(sizeof(Modifications)); if (!qpw->rs_modtail) qpw->rs_modtail = &ml->sml_next; ml->sml_next = qpw->rs_mods; qpw->rs_mods = ml; keys = ch_malloc( 2 * sizeof(struct berval) ); BER_BVZERO( &keys[1] ); nthash( &pwd, keys ); ml->sml_desc = ad_sambaNTPassword; ml->sml_op = LDAP_MOD_REPLACE; #ifdef SLAP_MOD_INTERNAL ml->sml_flags = SLAP_MOD_INTERNAL; #endif ml->sml_numvals = 1; ml->sml_values = keys; ml->sml_nvalues = NULL; /* Truncate UCS2 to 8-bit ASCII */ c = pwd.bv_val+1; d = pwd.bv_val+2; for (j=1; j<l; j++) { *c++ = *d++; d++; } pwd.bv_len /= 2; pwd.bv_val[pwd.bv_len] = '\0'; ml = ch_malloc(sizeof(Modifications)); ml->sml_next = qpw->rs_mods; qpw->rs_mods = ml; keys = ch_malloc( 2 * sizeof(struct berval) ); BER_BVZERO( &keys[1] ); lmhash( &pwd, keys ); ml->sml_desc = ad_sambaLMPassword; ml->sml_op = LDAP_MOD_REPLACE; #ifdef SLAP_MOD_INTERNAL ml->sml_flags = SLAP_MOD_INTERNAL; #endif ml->sml_numvals = 1; ml->sml_values = keys; ml->sml_nvalues = NULL; ch_free(wcs); ml = ch_malloc(sizeof(Modifications)); ml->sml_next = qpw->rs_mods; qpw->rs_mods = ml; keys = ch_malloc( 2 * sizeof(struct berval) ); keys[0].bv_val = ch_malloc( LDAP_PVT_INTTYPE_CHARS(long) ); keys[0].bv_len = snprintf(keys[0].bv_val, LDAP_PVT_INTTYPE_CHARS(long), "%ld", slap_get_time()); BER_BVZERO( &keys[1] ); ml->sml_desc = ad_sambaPwdLastSet; ml->sml_op = LDAP_MOD_REPLACE; #ifdef SLAP_MOD_INTERNAL ml->sml_flags = SLAP_MOD_INTERNAL; #endif ml->sml_numvals = 1; ml->sml_values = keys; ml->sml_nvalues = NULL; if (pi->smb_must_change) { ml = ch_malloc(sizeof(Modifications)); ml->sml_next = qpw->rs_mods; qpw->rs_mods = ml; keys = ch_malloc( 2 * sizeof(struct berval) ); keys[0].bv_val = ch_malloc( LDAP_PVT_INTTYPE_CHARS(long) ); keys[0].bv_len = snprintf(keys[0].bv_val, LDAP_PVT_INTTYPE_CHARS(long), "%ld", slap_get_time() + pi->smb_must_change); BER_BVZERO( &keys[1] ); ml->sml_desc = ad_sambaPwdMustChange; ml->sml_op = LDAP_MOD_REPLACE; #ifdef SLAP_MOD_INTERNAL ml->sml_flags = SLAP_MOD_INTERNAL; #endif ml->sml_numvals = 1; ml->sml_values = keys; ml->sml_nvalues = NULL; } if (pi->smb_can_change) { ml = ch_malloc(sizeof(Modifications)); ml->sml_next = qpw->rs_mods; qpw->rs_mods = ml; keys = ch_malloc( 2 * sizeof(struct berval) ); keys[0].bv_val = ch_malloc( LDAP_PVT_INTTYPE_CHARS(long) ); keys[0].bv_len = snprintf(keys[0].bv_val, LDAP_PVT_INTTYPE_CHARS(long), "%ld", slap_get_time() + pi->smb_can_change); BER_BVZERO( &keys[1] ); ml->sml_desc = ad_sambaPwdCanChange; ml->sml_op = LDAP_MOD_REPLACE; #ifdef SLAP_MOD_INTERNAL ml->sml_flags = SLAP_MOD_INTERNAL; #endif ml->sml_numvals = 1; ml->sml_values = keys; ml->sml_nvalues = NULL; } } #endif /* DO_SAMBA */ #ifdef DO_SHADOW /* shadow stuff */ if ( SMBK5PWD_DO_SHADOW( pi ) && is_entry_objectclass(e, oc_shadowAccount, 0 ) ) { struct berval *keys; ml = ch_malloc(sizeof(Modifications)); if (!qpw->rs_modtail) qpw->rs_modtail = &ml->sml_next; ml->sml_next = qpw->rs_mods; qpw->rs_mods = ml; keys = ch_malloc( sizeof(struct berval) * 2); BER_BVZERO( &keys[1] ); keys[0].bv_val = ch_malloc( LDAP_PVT_INTTYPE_CHARS(long) ); keys[0].bv_len = snprintf(keys[0].bv_val, LDAP_PVT_INTTYPE_CHARS(long), "%ld", (long)(slap_get_time() / (60 * 60 * 24))); ml->sml_desc = ad_shadowLastChange; ml->sml_op = LDAP_MOD_REPLACE; #ifdef SLAP_MOD_INTERNAL ml->sml_flags = SLAP_MOD_INTERNAL; #endif ml->sml_numvals = 1; ml->sml_values = keys; ml->sml_nvalues = NULL; } #endif /* DO_SHADOW */ be_entry_release_r( op, e ); qpw->rs_new.bv_val[qpw->rs_new.bv_len] = term; return SLAP_CB_CONTINUE; } static slap_overinst smbk5pwd; /* back-config stuff */ enum { PC_SMB_MUST_CHANGE = 1, PC_SMB_CAN_CHANGE, PC_SMB_ENABLE }; static ConfigDriver smbk5pwd_cf_func; /* * NOTE: uses OID arcs OLcfgCtAt:1 and OLcfgCtOc:1 */ static ConfigTable smbk5pwd_cfats[] = { { "smbk5pwd-enable", "arg", 2, 0, 0, ARG_MAGIC|PC_SMB_ENABLE, smbk5pwd_cf_func, "( OLcfgCtAt:1.1 NAME 'olcSmbK5PwdEnable' " "DESC 'Modules to be enabled' " "SYNTAX OMsDirectoryString )", NULL, NULL }, { "smbk5pwd-must-change", "time", 2, 2, 0, ARG_MAGIC|ARG_INT|PC_SMB_MUST_CHANGE, smbk5pwd_cf_func, "( OLcfgCtAt:1.2 NAME 'olcSmbK5PwdMustChange' " "DESC 'Credentials validity interval' " "SYNTAX OMsInteger SINGLE-VALUE )", NULL, NULL }, { "smbk5pwd-can-change", "time", 2, 2, 0, ARG_MAGIC|ARG_INT|PC_SMB_CAN_CHANGE, smbk5pwd_cf_func, "( OLcfgCtAt:1.3 NAME 'olcSmbK5PwdCanChange' " "DESC 'Credentials minimum validity interval' " "SYNTAX OMsInteger SINGLE-VALUE )", NULL, NULL }, { NULL, NULL, 0, 0, 0, ARG_IGNORED } }; static ConfigOCs smbk5pwd_cfocs[] = { { "( OLcfgCtOc:1.1 " "NAME 'olcSmbK5PwdConfig' " "DESC 'smbk5pwd overlay configuration' " "SUP olcOverlayConfig " "MAY ( " "olcSmbK5PwdEnable " "$ olcSmbK5PwdMustChange " "$ olcSmbK5PwdCanChange " ") )", Cft_Overlay, smbk5pwd_cfats }, { NULL, 0, NULL } }; /* * add here other functionalities; handle their initialization * as appropriate in smbk5pwd_modules_init(). */ static slap_verbmasks smbk5pwd_modules[] = { { BER_BVC( "krb5" ), SMBK5PWD_F_KRB5 }, { BER_BVC( "samba" ), SMBK5PWD_F_SAMBA }, { BER_BVC( "shadow" ), SMBK5PWD_F_SHADOW }, { BER_BVNULL, -1 } }; static int smbk5pwd_cf_func( ConfigArgs *c ) { slap_overinst *on = (slap_overinst *)c->bi; int rc = 0; smbk5pwd_t *pi = on->on_bi.bi_private; if ( c->op == SLAP_CONFIG_EMIT ) { switch( c->type ) { case PC_SMB_MUST_CHANGE: #ifdef DO_SAMBA c->value_int = pi->smb_must_change; #else /* ! DO_SAMBA */ c->value_int = 0; #endif /* ! DO_SAMBA */ break; case PC_SMB_CAN_CHANGE: #ifdef DO_SAMBA c->value_int = pi->smb_can_change; #else /* ! DO_SAMBA */ c->value_int = 0; #endif /* ! DO_SAMBA */ break; case PC_SMB_ENABLE: c->rvalue_vals = NULL; if ( pi->mode ) { mask_to_verbs( smbk5pwd_modules, pi->mode, &c->rvalue_vals ); if ( c->rvalue_vals == NULL ) { rc = 1; } } break; default: assert( 0 ); rc = 1; } return rc; } else if ( c->op == LDAP_MOD_DELETE ) { switch( c->type ) { case PC_SMB_MUST_CHANGE: break; case PC_SMB_CAN_CHANGE: break; case PC_SMB_ENABLE: if ( !c->line ) { pi->mode = 0; } else { int i; i = verb_to_mask( c->line, smbk5pwd_modules ); pi->mode &= ~smbk5pwd_modules[i].mask; } break; default: assert( 0 ); rc = 1; } return rc; } switch( c->type ) { case PC_SMB_MUST_CHANGE: #ifdef DO_SAMBA if ( c->value_int < 0 ) { Debug( LDAP_DEBUG_ANY, "%s: smbk5pwd: " "<%s> invalid negative value \"%d\".", c->log, c->argv[ 0 ], 0 ); return 1; } pi->smb_must_change = c->value_int; #else /* ! DO_SAMBA */ Debug( LDAP_DEBUG_ANY, "%s: smbk5pwd: " "<%s> only meaningful " "when compiled with -DDO_SAMBA.\n", c->log, c->argv[ 0 ], 0 ); return 1; #endif /* ! DO_SAMBA */ break; case PC_SMB_CAN_CHANGE: #ifdef DO_SAMBA if ( c->value_int < 0 ) { Debug( LDAP_DEBUG_ANY, "%s: smbk5pwd: " "<%s> invalid negative value \"%d\".", c->log, c->argv[ 0 ], 0 ); return 1; } pi->smb_can_change = c->value_int; #else /* ! DO_SAMBA */ Debug( LDAP_DEBUG_ANY, "%s: smbk5pwd: " "<%s> only meaningful " "when compiled with -DDO_SAMBA.\n", c->log, c->argv[ 0 ], 0 ); return 1; #endif /* ! DO_SAMBA */ break; case PC_SMB_ENABLE: { slap_mask_t mode = pi->mode, m = 0; rc = verbs_to_mask( c->argc, c->argv, smbk5pwd_modules, &m ); if ( rc > 0 ) { Debug( LDAP_DEBUG_ANY, "%s: smbk5pwd: " "<%s> unknown module \"%s\".\n", c->log, c->argv[ 0 ], c->argv[ rc ] ); return 1; } /* we can hijack the smbk5pwd_t structure because * from within the configuration, this is the only * active thread. */ pi->mode |= m; #ifndef DO_KRB5 if ( SMBK5PWD_DO_KRB5( pi ) ) { Debug( LDAP_DEBUG_ANY, "%s: smbk5pwd: " "<%s> module \"%s\" only allowed when compiled with -DDO_KRB5.\n", c->log, c->argv[ 0 ], c->argv[ rc ] ); pi->mode = mode; return 1; } #endif /* ! DO_KRB5 */ #ifndef DO_SAMBA if ( SMBK5PWD_DO_SAMBA( pi ) ) { Debug( LDAP_DEBUG_ANY, "%s: smbk5pwd: " "<%s> module \"%s\" only allowed when compiled with -DDO_SAMBA.\n", c->log, c->argv[ 0 ], c->argv[ rc ] ); pi->mode = mode; return 1; } #endif /* ! DO_SAMBA */ #ifndef DO_SHADOW if ( SMBK5PWD_DO_SHADOW( pi ) ) { Debug( LDAP_DEBUG_ANY, "%s: smbk5pwd: " "<%s> module \"%s\" only allowed when compiled with -DDO_SHADOW.\n", c->log, c->argv[ 0 ], c->argv[ rc ] ); pi->mode = mode; return 1; } #endif /* ! DO_SHADOW */ /* Re-initialize the module, because * the configuration might have changed */ rc = smbk5pwd_modules_init( pi ); if ( rc ) { pi->mode = mode; return 1; } } break; default: assert( 0 ); return 1; } return rc; } static int smbk5pwd_modules_init( smbk5pwd_t *pi ) { static struct { const char *name; AttributeDescription **adp; } #ifdef DO_KRB5 krb5_ad[] = { { "krb5Key", &ad_krb5Key }, { "krb5KeyVersionNumber", &ad_krb5KeyVersionNumber }, { "krb5PrincipalName", &ad_krb5PrincipalName }, { "krb5ValidEnd", &ad_krb5ValidEnd }, { NULL } }, #endif /* DO_KRB5 */ #ifdef DO_SAMBA samba_ad[] = { { "sambaLMPassword", &ad_sambaLMPassword }, { "sambaNTPassword", &ad_sambaNTPassword }, { "sambaPwdLastSet", &ad_sambaPwdLastSet }, { "sambaPwdMustChange", &ad_sambaPwdMustChange }, { "sambaPwdCanChange", &ad_sambaPwdCanChange }, { NULL } }, #endif /* DO_SAMBA */ #ifdef DO_SHADOW shadow_ad[] = { { "shadowLastChange", &ad_shadowLastChange }, { NULL } }, #endif /* DO_SHADOW */ dummy_ad; /* this is to silence the unused var warning */ (void) dummy_ad; #ifdef DO_KRB5 if ( SMBK5PWD_DO_KRB5( pi ) && oc_krb5KDCEntry == NULL ) { krb5_error_code ret; extern HDB *_kadm5_s_get_db(void *); int i, rc; /* Make sure all of our necessary schema items are loaded */ oc_krb5KDCEntry = oc_find( "krb5KDCEntry" ); if ( !oc_krb5KDCEntry ) { Debug( LDAP_DEBUG_ANY, "smbk5pwd: " "unable to find \"krb5KDCEntry\" objectClass.\n", 0, 0, 0 ); return -1; } for ( i = 0; krb5_ad[ i ].name != NULL; i++ ) { const char *text; *(krb5_ad[ i ].adp) = NULL; rc = slap_str2ad( krb5_ad[ i ].name, krb5_ad[ i ].adp, &text ); if ( rc != LDAP_SUCCESS ) { Debug( LDAP_DEBUG_ANY, "smbk5pwd: " "unable to find \"%s\" attributeType: %s (%d).\n", krb5_ad[ i ].name, text, rc ); oc_krb5KDCEntry = NULL; return rc; } } /* Initialize Kerberos context */ ret = krb5_init_context(&context); if (ret) { Debug( LDAP_DEBUG_ANY, "smbk5pwd: " "unable to initialize krb5 context (%d).\n", ret, 0, 0 ); oc_krb5KDCEntry = NULL; return -1; } ret = kadm5_s_init_with_password_ctx( context, KADM5_ADMIN_SERVICE, NULL, KADM5_ADMIN_SERVICE, &conf, 0, 0, &kadm_context ); if (ret) { char *err_str, *err_msg = "<unknown error>"; err_str = krb5_get_error_string( context ); if (!err_str) err_msg = (char *)krb5_get_err_text( context, ret ); Debug( LDAP_DEBUG_ANY, "smbk5pwd: " "unable to initialize krb5 admin context: %s (%d).\n", err_str ? err_str : err_msg, ret, 0 ); if (err_str) krb5_free_error_string( context, err_str ); krb5_free_context( context ); oc_krb5KDCEntry = NULL; return -1; } db = _kadm5_s_get_db( kadm_context ); } #endif /* DO_KRB5 */ #ifdef DO_SAMBA if ( SMBK5PWD_DO_SAMBA( pi ) && oc_sambaSamAccount == NULL ) { int i, rc; oc_sambaSamAccount = oc_find( "sambaSamAccount" ); if ( !oc_sambaSamAccount ) { Debug( LDAP_DEBUG_ANY, "smbk5pwd: " "unable to find \"sambaSamAccount\" objectClass.\n", 0, 0, 0 ); return -1; } for ( i = 0; samba_ad[ i ].name != NULL; i++ ) { const char *text; *(samba_ad[ i ].adp) = NULL; rc = slap_str2ad( samba_ad[ i ].name, samba_ad[ i ].adp, &text ); if ( rc != LDAP_SUCCESS ) { Debug( LDAP_DEBUG_ANY, "smbk5pwd: " "unable to find \"%s\" attributeType: %s (%d).\n", samba_ad[ i ].name, text, rc ); oc_sambaSamAccount = NULL; return rc; } } } #endif /* DO_SAMBA */ #ifdef DO_SHADOW if ( SMBK5PWD_DO_SHADOW( pi ) && oc_shadowAccount == NULL ) { int i, rc; oc_shadowAccount = oc_find( "shadowAccount" ); if ( !oc_shadowAccount ) { Debug( LDAP_DEBUG_ANY, "smbk5pwd: " "unable to find \"shadowAccount\" objectClass.\n", 0, 0, 0 ); return -1; } for ( i = 0; shadow_ad[ i ].name != NULL; i++ ) { const char *text; *(shadow_ad[ i ].adp) = NULL; rc = slap_str2ad( shadow_ad[ i ].name, shadow_ad[ i ].adp, &text ); if ( rc != LDAP_SUCCESS ) { Debug( LDAP_DEBUG_ANY, "smbk5pwd: " "unable to find \"%s\" attributeType: %s (%d).\n", shadow_ad[ i ].name, text, rc ); oc_shadowAccount = NULL; return rc; } } } #endif /* DO_SHADOW */ return 0; } static int smbk5pwd_db_init(BackendDB *be, ConfigReply *cr) { slap_overinst *on = (slap_overinst *)be->bd_info; smbk5pwd_t *pi; pi = ch_calloc( 1, sizeof( smbk5pwd_t ) ); if ( pi == NULL ) { return 1; } on->on_bi.bi_private = (void *)pi; return 0; } static int smbk5pwd_db_open(BackendDB *be, ConfigReply *cr) { slap_overinst *on = (slap_overinst *)be->bd_info; smbk5pwd_t *pi = (smbk5pwd_t *)on->on_bi.bi_private; int rc; if ( pi->mode == 0 ) { pi->mode = SMBK5PWD_F_ALL; } rc = smbk5pwd_modules_init( pi ); if ( rc ) { return rc; } return 0; } static int smbk5pwd_db_destroy(BackendDB *be, ConfigReply *cr) { slap_overinst *on = (slap_overinst *)be->bd_info; smbk5pwd_t *pi = (smbk5pwd_t *)on->on_bi.bi_private; if ( pi ) { ch_free( pi ); } return 0; } int smbk5pwd_initialize(void) { int rc; smbk5pwd.on_bi.bi_type = "smbk5pwd"; smbk5pwd.on_bi.bi_db_init = smbk5pwd_db_init; smbk5pwd.on_bi.bi_db_open = smbk5pwd_db_open; smbk5pwd.on_bi.bi_db_destroy = smbk5pwd_db_destroy; smbk5pwd.on_bi.bi_extended = smbk5pwd_exop_passwd; #ifdef DO_KRB5 smbk5pwd.on_bi.bi_op_bind = smbk5pwd_op_bind; lutil_passwd_add( (struct berval *)&k5key_scheme, k5key_chk, k5key_hash ); #endif smbk5pwd.on_bi.bi_cf_ocs = smbk5pwd_cfocs; rc = config_register_schema( smbk5pwd_cfats, smbk5pwd_cfocs ); if ( rc ) { return rc; } return overlay_register( &smbk5pwd ); } #if SLAPD_OVER_SMBK5PWD == SLAPD_MOD_DYNAMIC int init_module(int argc, char *argv[]) { return smbk5pwd_initialize(); } #endif #endif /* defined(SLAPD_OVER_SMBK5PWD) */
42414.c
#include <stdint.h> #include "spi_regs.h" void ets_printf(const char*, ...); void ets_delay_us(uint32_t us); #define TEST_ADDR (0x200000) // 0-63 bytes read works fine // 64 bytes read doesn't work // 64 bytes write works fine #define SIZE 64 void test_spi_read() { int i; uint32_t buf[16]; SPI(0).ADDR = TEST_ADDR | (SIZE << 24); SPI(0).CMD = SPI_CMD_READ; while (SPI(0).CMD) {}; for (i = 0; i < 16; i++) { buf[i] = SPI(0).W[i]; } for (i = 0; i < 16; i++) { ets_printf("Data[%d]=%x\n", i, buf[i]); } } void test_spi_write(uint8_t value) { int i; uint32_t buf[16]; for (i = 0; i < SIZE; i++) { ((uint8_t*)buf)[i] = value; } SPI(0).ADDR = TEST_ADDR | (SIZE << 24); for (i = 0; i < 16; i++) { SPI(0).W[i] = buf[i]; } SPI(0).CMD = SPI_CMD_WRITE_ENABLE; while (SPI(0).CMD) {} SPI(0).CMD = SPI_CMD_PP; while (SPI(0).CMD) {}; } void call_user_start() { uint8_t value = 0xAA; while (1) { ets_printf("Start writing\n"); test_spi_write(value); ets_printf("Writing done\n"); ets_printf("Start reading\n"); test_spi_read(); ets_printf("Reading done\n"); ets_delay_us(1000000); } }
535464.c
/* * Common prep/chrp pci routines. -- Cort */ #include <linux/config.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/bootmem.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/sections.h> #include <asm/pci-bridge.h> #include <asm/byteorder.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <asm/machdep.h> #undef DEBUG #ifdef DEBUG #define DBG(x...) printk(x) #else #define DBG(x...) #endif unsigned long isa_io_base = 0; unsigned long isa_mem_base = 0; unsigned long pci_dram_offset = 0; int pcibios_assign_bus_offset = 1; void pcibios_make_OF_bus_map(void); static int pci_relocate_bridge_resource(struct pci_bus *bus, int i); static int probe_resource(struct pci_bus *parent, struct resource *pr, struct resource *res, struct resource **conflict); static void update_bridge_base(struct pci_bus *bus, int i); static void pcibios_fixup_resources(struct pci_dev* dev); static void fixup_broken_pcnet32(struct pci_dev* dev); static int reparent_resources(struct resource *parent, struct resource *res); static void fixup_cpc710_pci64(struct pci_dev* dev); #ifdef CONFIG_PPC_OF static u8* pci_to_OF_bus_map; #endif /* By default, we don't re-assign bus numbers. */ int pci_assign_all_buses; struct pci_controller* hose_head; struct pci_controller** hose_tail = &hose_head; static int pci_bus_count; static void fixup_broken_pcnet32(struct pci_dev* dev) { if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { dev->vendor = PCI_VENDOR_ID_AMD; pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); static void fixup_cpc710_pci64(struct pci_dev* dev) { /* Hide the PCI64 BARs from the kernel as their content doesn't * fit well in the resource management */ dev->resource[0].start = dev->resource[0].end = 0; dev->resource[0].flags = 0; dev->resource[1].start = dev->resource[1].end = 0; dev->resource[1].flags = 0; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64); static void pcibios_fixup_resources(struct pci_dev *dev) { struct pci_controller* hose = (struct pci_controller *)dev->sysdata; int i; unsigned long offset; if (!hose) { printk(KERN_ERR "No hose for PCI dev %s!\n", pci_name(dev)); return; } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { struct resource *res = dev->resource + i; if (!res->flags) continue; if (res->end == 0xffffffff) { DBG("PCI:%s Resource %d [%08lx-%08lx] is unassigned\n", pci_name(dev), i, res->start, res->end); res->end -= res->start; res->start = 0; res->flags |= IORESOURCE_UNSET; continue; } offset = 0; if (res->flags & IORESOURCE_MEM) { offset = hose->pci_mem_offset; } else if (res->flags & IORESOURCE_IO) { offset = (unsigned long) hose->io_base_virt - isa_io_base; } if (offset != 0) { res->start += offset; res->end += offset; #ifdef DEBUG printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n", i, res->flags, pci_name(dev), res->start - offset, res->start); #endif } } /* Call machine specific resource fixup */ if (ppc_md.pcibios_fixup_resources) ppc_md.pcibios_fixup_resources(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, struct resource *res) { unsigned long offset = 0; struct pci_controller *hose = dev->sysdata; if (hose && res->flags & IORESOURCE_IO) offset = (unsigned long)hose->io_base_virt - isa_io_base; else if (hose && res->flags & IORESOURCE_MEM) offset = hose->pci_mem_offset; region->start = res->start - offset; region->end = res->end - offset; } EXPORT_SYMBOL(pcibios_resource_to_bus); void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region) { unsigned long offset = 0; struct pci_controller *hose = dev->sysdata; if (hose && res->flags & IORESOURCE_IO) offset = (unsigned long)hose->io_base_virt - isa_io_base; else if (hose && res->flags & IORESOURCE_MEM) offset = hose->pci_mem_offset; res->start = region->start + offset; res->end = region->end + offset; } EXPORT_SYMBOL(pcibios_bus_to_resource); /* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the * addresses to be allocated in the 0x000-0x0ff region * modulo 0x400. * * Why? Because some silly external IO cards only decode * the low 10 bits of the IO address. The 0x00-0xff region * is reserved for motherboard devices that decode all 16 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, * but we want to try to avoid allocating at 0x2900-0x2bff * which might have be mirrored at 0x0100-0x03ff.. */ void pcibios_align_resource(void *data, struct resource *res, unsigned long size, unsigned long align) { struct pci_dev *dev = data; if (res->flags & IORESOURCE_IO) { unsigned long start = res->start; if (size > 0x100) { printk(KERN_ERR "PCI: I/O Region %s/%d too large" " (%ld bytes)\n", pci_name(dev), dev->resource - res, size); } if (start & 0x300) { start = (start + 0x3ff) & ~0x3ff; res->start = start; } } } EXPORT_SYMBOL(pcibios_align_resource); /* * Handle resources of PCI devices. If the world were perfect, we could * just allocate all the resource regions and do nothing more. It isn't. * On the other hand, we cannot just re-allocate all devices, as it would * require us to know lots of host bridge internals. So we attempt to * keep as much of the original configuration as possible, but tweak it * when it's found to be wrong. * * Known BIOS problems we have to work around: * - I/O or memory regions not configured * - regions configured, but not enabled in the command register * - bogus I/O addresses above 64K used * - expansion ROMs left enabled (this may sound harmless, but given * the fact the PCI specs explicitly allow address decoders to be * shared between expansion ROMs and other resource regions, it's * at least dangerous) * * Our solution: * (1) Allocate resources for all buses behind PCI-to-PCI bridges. * This gives us fixed barriers on where we can allocate. * (2) Allocate resources for all enabled devices. If there is * a collision, just mark the resource as unallocated. Also * disable expansion ROMs during this step. * (3) Try to allocate resources for disabled devices. If the * resources were assigned correctly, everything goes well, * if they weren't, they won't disturb allocation of other * resources. * (4) Assign new addresses to resources which were either * not configured at all or misconfigured. If explicitly * requested by the user, configure expansion ROM address * as well. */ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) { struct pci_bus *bus; int i; struct resource *res, *pr; /* Depth-First Search on bus tree */ list_for_each_entry(bus, bus_list, node) { for (i = 0; i < 4; ++i) { if ((res = bus->resource[i]) == NULL || !res->flags || res->start > res->end) continue; if (bus->parent == NULL) pr = (res->flags & IORESOURCE_IO)? &ioport_resource: &iomem_resource; else { pr = pci_find_parent_resource(bus->self, res); if (pr == res) { /* this happens when the generic PCI * code (wrongly) decides that this * bridge is transparent -- paulus */ continue; } } DBG("PCI: bridge rsrc %lx..%lx (%lx), parent %p\n", res->start, res->end, res->flags, pr); if (pr) { if (request_resource(pr, res) == 0) continue; /* * Must be a conflict with an existing entry. * Move that entry (or entries) under the * bridge resource and try again. */ if (reparent_resources(pr, res) == 0) continue; } printk(KERN_ERR "PCI: Cannot allocate resource region " "%d of PCI bridge %d\n", i, bus->number); if (pci_relocate_bridge_resource(bus, i)) bus->resource[i] = NULL; } pcibios_allocate_bus_resources(&bus->children); } } /* * Reparent resource children of pr that conflict with res * under res, and make res replace those children. */ static int __init reparent_resources(struct resource *parent, struct resource *res) { struct resource *p, **pp; struct resource **firstpp = NULL; for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { if (p->end < res->start) continue; if (res->end < p->start) break; if (p->start < res->start || p->end > res->end) return -1; /* not completely contained */ if (firstpp == NULL) firstpp = pp; } if (firstpp == NULL) return -1; /* didn't find any conflicting entries? */ res->parent = parent; res->child = *firstpp; res->sibling = *pp; *firstpp = res; *pp = NULL; for (p = res->child; p != NULL; p = p->sibling) { p->parent = res; DBG(KERN_INFO "PCI: reparented %s [%lx..%lx] under %s\n", p->name, p->start, p->end, res->name); } return 0; } /* * A bridge has been allocated a range which is outside the range * of its parent bridge, so it needs to be moved. */ static int __init pci_relocate_bridge_resource(struct pci_bus *bus, int i) { struct resource *res, *pr, *conflict; unsigned long try, size; int j; struct pci_bus *parent = bus->parent; if (parent == NULL) { /* shouldn't ever happen */ printk(KERN_ERR "PCI: can't move host bridge resource\n"); return -1; } res = bus->resource[i]; if (res == NULL) return -1; pr = NULL; for (j = 0; j < 4; j++) { struct resource *r = parent->resource[j]; if (!r) continue; if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) continue; if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) { pr = r; break; } if (res->flags & IORESOURCE_PREFETCH) pr = r; } if (pr == NULL) return -1; size = res->end - res->start; if (pr->start > pr->end || size > pr->end - pr->start) return -1; try = pr->end; for (;;) { res->start = try - size; res->end = try; if (probe_resource(bus->parent, pr, res, &conflict) == 0) break; if (conflict->start <= pr->start + size) return -1; try = conflict->start - 1; } if (request_resource(pr, res)) { DBG(KERN_ERR "PCI: huh? couldn't move to %lx..%lx\n", res->start, res->end); return -1; /* "can't happen" */ } update_bridge_base(bus, i); printk(KERN_INFO "PCI: bridge %d resource %d moved to %lx..%lx\n", bus->number, i, res->start, res->end); return 0; } static int __init probe_resource(struct pci_bus *parent, struct resource *pr, struct resource *res, struct resource **conflict) { struct pci_bus *bus; struct pci_dev *dev; struct resource *r; int i; for (r = pr->child; r != NULL; r = r->sibling) { if (r->end >= res->start && res->end >= r->start) { *conflict = r; return 1; } } list_for_each_entry(bus, &parent->children, node) { for (i = 0; i < 4; ++i) { if ((r = bus->resource[i]) == NULL) continue; if (!r->flags || r->start > r->end || r == res) continue; if (pci_find_parent_resource(bus->self, r) != pr) continue; if (r->end >= res->start && res->end >= r->start) { *conflict = r; return 1; } } } list_for_each_entry(dev, &parent->devices, bus_list) { for (i = 0; i < 6; ++i) { r = &dev->resource[i]; if (!r->flags || (r->flags & IORESOURCE_UNSET)) continue; if (pci_find_parent_resource(dev, r) != pr) continue; if (r->end >= res->start && res->end >= r->start) { *conflict = r; return 1; } } } return 0; } static void __init update_bridge_base(struct pci_bus *bus, int i) { struct resource *res = bus->resource[i]; u8 io_base_lo, io_limit_lo; u16 mem_base, mem_limit; u16 cmd; unsigned long start, end, off; struct pci_dev *dev = bus->self; struct pci_controller *hose = dev->sysdata; if (!hose) { printk("update_bridge_base: no hose?\n"); return; } pci_read_config_word(dev, PCI_COMMAND, &cmd); pci_write_config_word(dev, PCI_COMMAND, cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY)); if (res->flags & IORESOURCE_IO) { off = (unsigned long) hose->io_base_virt - isa_io_base; start = res->start - off; end = res->end - off; io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK; io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK; if (end > 0xffff) { pci_write_config_word(dev, PCI_IO_BASE_UPPER16, start >> 16); pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16, end >> 16); io_base_lo |= PCI_IO_RANGE_TYPE_32; } else io_base_lo |= PCI_IO_RANGE_TYPE_16; pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo); pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo); } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) == IORESOURCE_MEM) { off = hose->pci_mem_offset; mem_base = ((res->start - off) >> 16) & PCI_MEMORY_RANGE_MASK; mem_limit = ((res->end - off) >> 16) & PCI_MEMORY_RANGE_MASK; pci_write_config_word(dev, PCI_MEMORY_BASE, mem_base); pci_write_config_word(dev, PCI_MEMORY_LIMIT, mem_limit); } else if ((res->flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) == (IORESOURCE_MEM | IORESOURCE_PREFETCH)) { off = hose->pci_mem_offset; mem_base = ((res->start - off) >> 16) & PCI_PREF_RANGE_MASK; mem_limit = ((res->end - off) >> 16) & PCI_PREF_RANGE_MASK; pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, mem_base); pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, mem_limit); } else { DBG(KERN_ERR "PCI: ugh, bridge %s res %d has flags=%lx\n", pci_name(dev), i, res->flags); } pci_write_config_word(dev, PCI_COMMAND, cmd); } static inline void alloc_resource(struct pci_dev *dev, int idx) { struct resource *pr, *r = &dev->resource[idx]; DBG("PCI:%s: Resource %d: %08lx-%08lx (f=%lx)\n", pci_name(dev), idx, r->start, r->end, r->flags); pr = pci_find_parent_resource(dev, r); if (!pr || request_resource(pr, r) < 0) { printk(KERN_ERR "PCI: Cannot allocate resource region %d" " of device %s\n", idx, pci_name(dev)); if (pr) DBG("PCI: parent is %p: %08lx-%08lx (f=%lx)\n", pr, pr->start, pr->end, pr->flags); /* We'll assign a new address later */ r->flags |= IORESOURCE_UNSET; r->end -= r->start; r->start = 0; } } static void __init pcibios_allocate_resources(int pass) { struct pci_dev *dev = NULL; int idx, disabled; u16 command; struct resource *r; for_each_pci_dev(dev) { pci_read_config_word(dev, PCI_COMMAND, &command); for (idx = 0; idx < 6; idx++) { r = &dev->resource[idx]; if (r->parent) /* Already allocated */ continue; if (!r->flags || (r->flags & IORESOURCE_UNSET)) continue; /* Not assigned at all */ if (r->flags & IORESOURCE_IO) disabled = !(command & PCI_COMMAND_IO); else disabled = !(command & PCI_COMMAND_MEMORY); if (pass == disabled) alloc_resource(dev, idx); } if (pass) continue; r = &dev->resource[PCI_ROM_RESOURCE]; if (r->flags & IORESOURCE_ROM_ENABLE) { /* Turn the ROM off, leave the resource region, but keep it unregistered. */ u32 reg; DBG("PCI: Switching off ROM of %s\n", pci_name(dev)); r->flags &= ~IORESOURCE_ROM_ENABLE; pci_read_config_dword(dev, dev->rom_base_reg, &reg); pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE); } } } static void __init pcibios_assign_resources(void) { struct pci_dev *dev = NULL; int idx; struct resource *r; for_each_pci_dev(dev) { int class = dev->class >> 8; /* Don't touch classless devices and host bridges */ if (!class || class == PCI_CLASS_BRIDGE_HOST) continue; for (idx = 0; idx < 6; idx++) { r = &dev->resource[idx]; /* * We shall assign a new address to this resource, * either because the BIOS (sic) forgot to do so * or because we have decided the old address was * unusable for some reason. */ if ((r->flags & IORESOURCE_UNSET) && r->end && (!ppc_md.pcibios_enable_device_hook || !ppc_md.pcibios_enable_device_hook(dev, 1))) { r->flags &= ~IORESOURCE_UNSET; pci_assign_resource(dev, idx); } } #if 0 /* don't assign ROMs */ r = &dev->resource[PCI_ROM_RESOURCE]; r->end -= r->start; r->start = 0; if (r->end) pci_assign_resource(dev, PCI_ROM_RESOURCE); #endif } } int pcibios_enable_resources(struct pci_dev *dev, int mask) { u16 cmd, old_cmd; int idx; struct resource *r; pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; for (idx=0; idx<6; idx++) { /* Only set up the requested stuff */ if (!(mask & (1<<idx))) continue; r = &dev->resource[idx]; if (r->flags & IORESOURCE_UNSET) { printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (dev->resource[PCI_ROM_RESOURCE].start) cmd |= PCI_COMMAND_MEMORY; if (cmd != old_cmd) { printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; } static int next_controller_index; struct pci_controller * __init pcibios_alloc_controller(void) { struct pci_controller *hose; hose = (struct pci_controller *)alloc_bootmem(sizeof(*hose)); memset(hose, 0, sizeof(struct pci_controller)); *hose_tail = hose; hose_tail = &hose->next; hose->index = next_controller_index++; return hose; } #ifdef CONFIG_PPC_OF /* * Functions below are used on OpenFirmware machines. */ static void make_one_node_map(struct device_node* node, u8 pci_bus) { int *bus_range; int len; if (pci_bus >= pci_bus_count) return; bus_range = (int *) get_property(node, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, " "assuming it starts at 0\n", node->full_name); pci_to_OF_bus_map[pci_bus] = 0; } else pci_to_OF_bus_map[pci_bus] = bus_range[0]; for (node=node->child; node != 0;node = node->sibling) { struct pci_dev* dev; unsigned int *class_code, *reg; class_code = (unsigned int *) get_property(node, "class-code", NULL); if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) continue; reg = (unsigned int *)get_property(node, "reg", NULL); if (!reg) continue; dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff)); if (!dev || !dev->subordinate) continue; make_one_node_map(node, dev->subordinate->number); } } void pcibios_make_OF_bus_map(void) { int i; struct pci_controller* hose; u8* of_prop_map; pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL); if (!pci_to_OF_bus_map) { printk(KERN_ERR "Can't allocate OF bus map !\n"); return; } /* We fill the bus map with invalid values, that helps * debugging. */ for (i=0; i<pci_bus_count; i++) pci_to_OF_bus_map[i] = 0xff; /* For each hose, we begin searching bridges */ for(hose=hose_head; hose; hose=hose->next) { struct device_node* node; node = (struct device_node *)hose->arch_data; if (!node) continue; make_one_node_map(node, hose->first_busno); } of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", NULL); if (of_prop_map) memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count); #ifdef DEBUG printk("PCI->OF bus map:\n"); for (i=0; i<pci_bus_count; i++) { if (pci_to_OF_bus_map[i] == 0xff) continue; printk("%d -> %d\n", i, pci_to_OF_bus_map[i]); } #endif } typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data); static struct device_node* scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data) { struct device_node* sub_node; for (; node != 0;node = node->sibling) { unsigned int *class_code; if (filter(node, data)) return node; /* For PCI<->PCI bridges or CardBus bridges, we go down * Note: some OFs create a parent node "multifunc-device" as * a fake root for all functions of a multi-function device, * we go down them as well. */ class_code = (unsigned int *) get_property(node, "class-code", NULL); if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && strcmp(node->name, "multifunc-device")) continue; sub_node = scan_OF_pci_childs(node->child, filter, data); if (sub_node) return sub_node; } return NULL; } static int scan_OF_pci_childs_iterator(struct device_node* node, void* data) { unsigned int *reg; u8* fdata = (u8*)data; reg = (unsigned int *) get_property(node, "reg", NULL); if (reg && ((reg[0] >> 8) & 0xff) == fdata[1] && ((reg[0] >> 16) & 0xff) == fdata[0]) return 1; return 0; } static struct device_node* scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn) { u8 filter_data[2] = {bus, dev_fn}; return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data); } /* * Scans the OF tree for a device node matching a PCI device */ struct device_node * pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) { struct pci_controller *hose; struct device_node *node; int busnr; if (!have_of) return NULL; /* Lookup the hose */ busnr = bus->number; hose = pci_bus_to_hose(busnr); if (!hose) return NULL; /* Check it has an OF node associated */ node = (struct device_node *) hose->arch_data; if (!node) return NULL; /* Fixup bus number according to what OF think it is. */ if (pci_to_OF_bus_map) busnr = pci_to_OF_bus_map[busnr]; if (busnr == 0xff) return NULL; /* Now, lookup childs of the hose */ return scan_OF_childs_for_device(node->child, busnr, devfn); } EXPORT_SYMBOL(pci_busdev_to_OF_node); struct device_node* pci_device_to_OF_node(struct pci_dev *dev) { return pci_busdev_to_OF_node(dev->bus, dev->devfn); } EXPORT_SYMBOL(pci_device_to_OF_node); /* This routine is meant to be used early during boot, when the * PCI bus numbers have not yet been assigned, and you need to * issue PCI config cycles to an OF device. * It could also be used to "fix" RTAS config cycles if you want * to set pci_assign_all_buses to 1 and still use RTAS for PCI * config cycles. */ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) { if (!have_of) return NULL; while(node) { struct pci_controller* hose; for (hose=hose_head;hose;hose=hose->next) if (hose->arch_data == node) return hose; node=node->parent; } return NULL; } static int find_OF_pci_device_filter(struct device_node* node, void* data) { return ((void *)node == data); } /* * Returns the PCI device matching a given OF node */ int pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) { unsigned int *reg; struct pci_controller* hose; struct pci_dev* dev = NULL; if (!have_of) return -ENODEV; /* Make sure it's really a PCI device */ hose = pci_find_hose_for_OF_device(node); if (!hose || !hose->arch_data) return -ENODEV; if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child, find_OF_pci_device_filter, (void *)node)) return -ENODEV; reg = (unsigned int *) get_property(node, "reg", NULL); if (!reg) return -ENODEV; *bus = (reg[0] >> 16) & 0xff; *devfn = ((reg[0] >> 8) & 0xff); /* Ok, here we need some tweak. If we have already renumbered * all busses, we can't rely on the OF bus number any more. * the pci_to_OF_bus_map is not enough as several PCI busses * may match the same OF bus number. */ if (!pci_to_OF_bus_map) return 0; for_each_pci_dev(dev) if (pci_to_OF_bus_map[dev->bus->number] == *bus && dev->devfn == *devfn) { *bus = dev->bus->number; pci_dev_put(dev); return 0; } return -ENODEV; } EXPORT_SYMBOL(pci_device_from_OF_node); void __init pci_process_bridge_OF_ranges(struct pci_controller *hose, struct device_node *dev, int primary) { static unsigned int static_lc_ranges[256] __initdata; unsigned int *dt_ranges, *lc_ranges, *ranges, *prev; unsigned int size; int rlen = 0, orig_rlen; int memno = 0; struct resource *res; int np, na = prom_n_addr_cells(dev); np = na + 5; /* First we try to merge ranges to fix a problem with some pmacs * that can have more than 3 ranges, fortunately using contiguous * addresses -- BenH */ dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen); if (!dt_ranges) return; /* Sanity check, though hopefully that never happens */ if (rlen > sizeof(static_lc_ranges)) { printk(KERN_WARNING "OF ranges property too large !\n"); rlen = sizeof(static_lc_ranges); } lc_ranges = static_lc_ranges; memcpy(lc_ranges, dt_ranges, rlen); orig_rlen = rlen; /* Let's work on a copy of the "ranges" property instead of damaging * the device-tree image in memory */ ranges = lc_ranges; prev = NULL; while ((rlen -= np * sizeof(unsigned int)) >= 0) { if (prev) { if (prev[0] == ranges[0] && prev[1] == ranges[1] && (prev[2] + prev[na+4]) == ranges[2] && (prev[na+2] + prev[na+4]) == ranges[na+2]) { prev[na+4] += ranges[na+4]; ranges[0] = 0; ranges += np; continue; } } prev = ranges; ranges += np; } /* * The ranges property is laid out as an array of elements, * each of which comprises: * cells 0 - 2: a PCI address * cells 3 or 3+4: a CPU physical address * (size depending on dev->n_addr_cells) * cells 4+5 or 5+6: the size of the range */ ranges = lc_ranges; rlen = orig_rlen; while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) { res = NULL; size = ranges[na+4]; switch ((ranges[0] >> 24) & 0x3) { case 1: /* I/O space */ if (ranges[2] != 0) break; hose->io_base_phys = ranges[na+2]; /* limit I/O space to 16MB */ if (size > 0x01000000) size = 0x01000000; hose->io_base_virt = ioremap(ranges[na+2], size); if (primary) isa_io_base = (unsigned long) hose->io_base_virt; res = &hose->io_resource; res->flags = IORESOURCE_IO; res->start = ranges[2]; DBG("PCI: IO 0x%lx -> 0x%lx\n", res->start, res->start + size - 1); break; case 2: /* memory space */ memno = 0; if (ranges[1] == 0 && ranges[2] == 0 && ranges[na+4] <= (16 << 20)) { /* 1st 16MB, i.e. ISA memory area */ if (primary) isa_mem_base = ranges[na+2]; memno = 1; } while (memno < 3 && hose->mem_resources[memno].flags) ++memno; if (memno == 0) hose->pci_mem_offset = ranges[na+2] - ranges[2]; if (memno < 3) { res = &hose->mem_resources[memno]; res->flags = IORESOURCE_MEM; if(ranges[0] & 0x40000000) res->flags |= IORESOURCE_PREFETCH; res->start = ranges[na+2]; DBG("PCI: MEM[%d] 0x%lx -> 0x%lx\n", memno, res->start, res->start + size - 1); } break; } if (res != NULL) { res->name = dev->full_name; res->end = res->start + size - 1; res->parent = NULL; res->sibling = NULL; res->child = NULL; } ranges += np; } } /* We create the "pci-OF-bus-map" property now so it appears in the * /proc device tree */ void __init pci_create_OF_bus_map(void) { struct property* of_prop; of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256); if (of_prop && find_path_device("/")) { memset(of_prop, -1, sizeof(struct property) + 256); of_prop->name = "pci-OF-bus-map"; of_prop->length = 256; of_prop->value = (unsigned char *)&of_prop[1]; prom_add_property(find_path_device("/"), of_prop); } } static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev; struct device_node *np; pdev = to_pci_dev (dev); np = pci_device_to_OF_node(pdev); if (np == NULL || np->full_name == NULL) return 0; return sprintf(buf, "%s", np->full_name); } static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); #else /* CONFIG_PPC_OF */ void pcibios_make_OF_bus_map(void) { } #endif /* CONFIG_PPC_OF */ /* Add sysfs properties */ void pcibios_add_platform_entries(struct pci_dev *pdev) { #ifdef CONFIG_PPC_OF device_create_file(&pdev->dev, &dev_attr_devspec); #endif /* CONFIG_PPC_OF */ } static int __init pcibios_init(void) { struct pci_controller *hose; struct pci_bus *bus; int next_busno; printk(KERN_INFO "PCI: Probing PCI hardware\n"); /* Scan all of the recorded PCI controllers. */ for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { if (pci_assign_all_buses) hose->first_busno = next_busno; hose->last_busno = 0xff; bus = pci_scan_bus(hose->first_busno, hose->ops, hose); hose->last_busno = bus->subordinate; if (pci_assign_all_buses || next_busno <= hose->last_busno) next_busno = hose->last_busno + pcibios_assign_bus_offset; } pci_bus_count = next_busno; /* OpenFirmware based machines need a map of OF bus * numbers vs. kernel bus numbers since we may have to * remap them. */ if (pci_assign_all_buses && have_of) pcibios_make_OF_bus_map(); /* Do machine dependent PCI interrupt routing */ if (ppc_md.pci_swizzle && ppc_md.pci_map_irq) pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq); /* Call machine dependent fixup */ if (ppc_md.pcibios_fixup) ppc_md.pcibios_fixup(); /* Allocate and assign resources */ pcibios_allocate_bus_resources(&pci_root_buses); pcibios_allocate_resources(0); pcibios_allocate_resources(1); pcibios_assign_resources(); /* Call machine dependent post-init code */ if (ppc_md.pcibios_after_init) ppc_md.pcibios_after_init(); return 0; } subsys_initcall(pcibios_init); unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp) { struct pci_controller *hose = dev->sysdata; if (dev->bus->number != hose->first_busno) { u8 pin = *pinp; do { pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); /* Move up the chain of bridges. */ dev = dev->bus->self; } while (dev->bus->self); *pinp = pin; /* The slot is the idsel of the last bridge. */ } return PCI_SLOT(dev->devfn); } unsigned long resource_fixup(struct pci_dev * dev, struct resource * res, unsigned long start, unsigned long size) { return start; } void __init pcibios_fixup_bus(struct pci_bus *bus) { struct pci_controller *hose = (struct pci_controller *) bus->sysdata; unsigned long io_offset; struct resource *res; int i; io_offset = (unsigned long)hose->io_base_virt - isa_io_base; if (bus->parent == NULL) { /* This is a host bridge - fill in its resources */ hose->bus = bus; bus->resource[0] = res = &hose->io_resource; if (!res->flags) { if (io_offset) printk(KERN_ERR "I/O resource not set for host" " bridge %d\n", hose->index); res->start = 0; res->end = IO_SPACE_LIMIT; res->flags = IORESOURCE_IO; } res->start += io_offset; res->end += io_offset; for (i = 0; i < 3; ++i) { res = &hose->mem_resources[i]; if (!res->flags) { if (i > 0) continue; printk(KERN_ERR "Memory resource not set for " "host bridge %d\n", hose->index); res->start = hose->pci_mem_offset; res->end = ~0U; res->flags = IORESOURCE_MEM; } bus->resource[i+1] = res; } } else { /* This is a subordinate bridge */ pci_read_bridge_bases(bus); for (i = 0; i < 4; ++i) { if ((res = bus->resource[i]) == NULL) continue; if (!res->flags) continue; if (io_offset && (res->flags & IORESOURCE_IO)) { res->start += io_offset; res->end += io_offset; } else if (hose->pci_mem_offset && (res->flags & IORESOURCE_MEM)) { res->start += hose->pci_mem_offset; res->end += hose->pci_mem_offset; } } } if (ppc_md.pcibios_fixup_bus) ppc_md.pcibios_fixup_bus(bus); } char __init *pcibios_setup(char *str) { return str; } /* the next one is stolen from the alpha port... */ void __init pcibios_update_irq(struct pci_dev *dev, int irq) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); /* XXX FIXME - update OF device tree node interrupt property */ } int pcibios_enable_device(struct pci_dev *dev, int mask) { u16 cmd, old_cmd; int idx; struct resource *r; if (ppc_md.pcibios_enable_device_hook) if (ppc_md.pcibios_enable_device_hook(dev, 0)) return -EINVAL; pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; for (idx=0; idx<6; idx++) { r = &dev->resource[idx]; if (r->flags & IORESOURCE_UNSET) { printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; } struct pci_controller* pci_bus_to_hose(int bus) { struct pci_controller* hose = hose_head; for (; hose; hose = hose->next) if (bus >= hose->first_busno && bus <= hose->last_busno) return hose; return NULL; } void __iomem * pci_bus_io_base(unsigned int bus) { struct pci_controller *hose; hose = pci_bus_to_hose(bus); if (!hose) return NULL; return hose->io_base_virt; } unsigned long pci_bus_io_base_phys(unsigned int bus) { struct pci_controller *hose; hose = pci_bus_to_hose(bus); if (!hose) return 0; return hose->io_base_phys; } unsigned long pci_bus_mem_base_phys(unsigned int bus) { struct pci_controller *hose; hose = pci_bus_to_hose(bus); if (!hose) return 0; return hose->pci_mem_offset; } unsigned long pci_resource_to_bus(struct pci_dev *pdev, struct resource *res) { /* Hack alert again ! See comments in chrp_pci.c */ struct pci_controller* hose = (struct pci_controller *)pdev->sysdata; if (hose && res->flags & IORESOURCE_MEM) return res->start - hose->pci_mem_offset; /* We may want to do something with IOs here... */ return res->start; } static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, unsigned long *offset, enum pci_mmap_state mmap_state) { struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); unsigned long io_offset = 0; int i, res_bit; if (hose == 0) return NULL; /* should never happen */ /* If memory, add on the PCI bridge address offset */ if (mmap_state == pci_mmap_mem) { *offset += hose->pci_mem_offset; res_bit = IORESOURCE_MEM; } else { io_offset = hose->io_base_virt - ___IO_BASE; *offset += io_offset; res_bit = IORESOURCE_IO; } /* * Check that the offset requested corresponds to one of the * resources of the device. */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &dev->resource[i]; int flags = rp->flags; /* treat ROM as memory (should be already) */ if (i == PCI_ROM_RESOURCE) flags |= IORESOURCE_MEM; /* Active and same type? */ if ((flags & res_bit) == 0) continue; /* In the range of this resource? */ if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) continue; /* found it! construct the final physical address */ if (mmap_state == pci_mmap_io) *offset += hose->io_base_phys - io_offset; return rp; } return NULL; } /* * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * device mapping. */ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, pgprot_t protection, enum pci_mmap_state mmap_state, int write_combine) { unsigned long prot = pgprot_val(protection); /* Write combine is always 0 on non-memory space mappings. On * memory space, if the user didn't pass 1, we check for a * "prefetchable" resource. This is a bit hackish, but we use * this to workaround the inability of /sysfs to provide a write * combine bit */ if (mmap_state != pci_mmap_mem) write_combine = 0; else if (write_combine == 0) { if (rp->flags & IORESOURCE_PREFETCH) write_combine = 1; } /* XXX would be nice to have a way to ask for write-through */ prot |= _PAGE_NO_CACHE; if (write_combine) prot &= ~_PAGE_GUARDED; else prot |= _PAGE_GUARDED; printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start, prot); return __pgprot(prot); } /* * This one is used by /dev/mem and fbdev who have no clue about the * PCI device, it tries to find the PCI device first and calls the * above routine */ pgprot_t pci_phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t protection) { struct pci_dev *pdev = NULL; struct resource *found = NULL; unsigned long prot = pgprot_val(protection); unsigned long offset = pfn << PAGE_SHIFT; int i; if (page_is_ram(pfn)) return prot; prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; for_each_pci_dev(pdev) { for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &pdev->resource[i]; int flags = rp->flags; /* Active and same type? */ if ((flags & IORESOURCE_MEM) == 0) continue; /* In the range of this resource? */ if (offset < (rp->start & PAGE_MASK) || offset > rp->end) continue; found = rp; break; } if (found) break; } if (found) { if (found->flags & IORESOURCE_PREFETCH) prot &= ~_PAGE_GUARDED; pci_dev_put(pdev); } DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); return __pgprot(prot); } /* * Perform the actual remap of the pages for a PCI device mapping, as * appropriate for this architecture. The region in the process to map * is described by vm_start and vm_end members of VMA, the base physical * address is found in vm_pgoff. * The pci device structure is provided so that architectures may make mapping * decisions on a per-device or per-bus basis. * * Returns a negative error code on failure, zero on success. */ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; struct resource *rp; int ret; rp = __pci_mmap_make_offset(dev, &offset, mmap_state); if (rp == NULL) return -EINVAL; vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO; vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, vma->vm_page_prot, mmap_state, write_combine); ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); return ret; } /* Obsolete functions. Should be removed once the symbios driver * is fixed */ unsigned long phys_to_bus(unsigned long pa) { struct pci_controller *hose; int i; for (hose = hose_head; hose; hose = hose->next) { for (i = 0; i < 3; ++i) { if (pa >= hose->mem_resources[i].start && pa <= hose->mem_resources[i].end) { /* * XXX the hose->pci_mem_offset really * only applies to mem_resources[0]. * We need a way to store an offset for * the others. -- paulus */ if (i == 0) pa -= hose->pci_mem_offset; return pa; } } } /* hmmm, didn't find it */ return 0; } unsigned long pci_phys_to_bus(unsigned long pa, int busnr) { struct pci_controller* hose = pci_bus_to_hose(busnr); if (!hose) return pa; return pa - hose->pci_mem_offset; } unsigned long pci_bus_to_phys(unsigned int ba, int busnr) { struct pci_controller* hose = pci_bus_to_hose(busnr); if (!hose) return ba; return ba + hose->pci_mem_offset; } /* Provide information on locations of various I/O regions in physical * memory. Do this on a per-card basis so that we choose the right * root bridge. * Note that the returned IO or memory base is a physical address */ long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) { struct pci_controller* hose; long result = -EOPNOTSUPP; hose = pci_bus_to_hose(bus); if (!hose) return -ENODEV; switch (which) { case IOBASE_BRIDGE_NUMBER: return (long)hose->first_busno; case IOBASE_MEMORY: return (long)hose->pci_mem_offset; case IOBASE_IO: return (long)hose->io_base_phys; case IOBASE_ISA_IO: return (long)isa_io_base; case IOBASE_ISA_MEM: return (long)isa_mem_base; } return result; } void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, u64 *start, u64 *end) { struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); unsigned long offset = 0; if (hose == NULL) return; if (rsrc->flags & IORESOURCE_IO) offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys; *start = rsrc->start + offset; *end = rsrc->end + offset; } void __init pci_init_resource(struct resource *res, unsigned long start, unsigned long end, int flags, char *name) { res->start = start; res->end = end; res->flags = flags; res->name = name; res->parent = NULL; res->sibling = NULL; res->child = NULL; } void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) { unsigned long start = pci_resource_start(dev, bar); unsigned long len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (!len) return NULL; if (max && len > max) len = max; if (flags & IORESOURCE_IO) return ioport_map(start, len); if (flags & IORESOURCE_MEM) /* Not checking IORESOURCE_CACHEABLE because PPC does * not currently distinguish between ioremap and * ioremap_nocache. */ return ioremap(start, len); /* What? */ return NULL; } void pci_iounmap(struct pci_dev *dev, void __iomem *addr) { /* Nothing to do */ } EXPORT_SYMBOL(pci_iomap); EXPORT_SYMBOL(pci_iounmap); unsigned long pci_address_to_pio(phys_addr_t address) { struct pci_controller* hose = hose_head; for (; hose; hose = hose->next) { unsigned int size = hose->io_resource.end - hose->io_resource.start + 1; if (address >= hose->io_base_phys && address < (hose->io_base_phys + size)) { unsigned long base = (unsigned long)hose->io_base_virt - _IO_BASE; return base + (address - hose->io_base_phys); } } return (unsigned int)-1; } EXPORT_SYMBOL(pci_address_to_pio); /* * Null PCI config access functions, for the case when we can't * find a hose. */ #define NULL_PCI_OP(rw, size, type) \ static int \ null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ { \ return PCIBIOS_DEVICE_NOT_FOUND; \ } static int null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { return PCIBIOS_DEVICE_NOT_FOUND; } static int null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { return PCIBIOS_DEVICE_NOT_FOUND; } static struct pci_ops null_pci_ops = { null_read_config, null_write_config }; /* * These functions are used early on before PCI scanning is done * and all of the pci_dev and pci_bus structures have been created. */ static struct pci_bus * fake_pci_bus(struct pci_controller *hose, int busnr) { static struct pci_bus bus; if (hose == 0) { hose = pci_bus_to_hose(busnr); if (hose == 0) printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); } bus.number = busnr; bus.sysdata = hose; bus.ops = hose? hose->ops: &null_pci_ops; return &bus; } #define EARLY_PCI_OP(rw, size, type) \ int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ int devfn, int offset, type value) \ { \ return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ devfn, offset, value); \ } EARLY_PCI_OP(read, byte, u8 *) EARLY_PCI_OP(read, word, u16 *) EARLY_PCI_OP(read, dword, u32 *) EARLY_PCI_OP(write, byte, u8) EARLY_PCI_OP(write, word, u16) EARLY_PCI_OP(write, dword, u32)
512322.c
#include "../../tracy.h" int get_abi(struct tracy_event *s) { (void)s; return TRACY_ABI_NATIVE; } long get_reg(struct TRACY_REGS_NAME *r, int reg, int abi) { (void) abi; /* We have only one ABI */ switch (reg) { case 0: return r->ebx; break; case 1: return r->ecx; break; case 2: return r->edx; break; case 3: return r->esi; break; case 4: return r->edi; break; case 5: return r->ebp; break; } return -1; } long set_reg(struct TRACY_REGS_NAME *r, int reg, int abi, long val) { (void) abi; /* We have only one ABI */ switch (reg) { case 0: r->ebx = val; break; case 1: r->ecx = val; break; case 2: r->edx = val; break; case 3: r->esi = val; break; case 4: r->edi = val; break; case 5: r->ebp = val; break; } return 0; }
65352.c
/* * libc/string/strcpy.c */ #include <xboot/module.h> #include <types.h> #include <string.h> /* * Compares two strings according to the current locale */ int strcoll(const char * s1, const char * s2) { return strcmp(s1, s2); } EXPORT_SYMBOL(strcoll);
277338.c
/* * @explain: Copyright (c) 2020 WEI.ZHOU. All rights reserved. * The following code is only used for learning and communication, not for * illegal and commercial use. If the code is used, no consent is required, but * the author has nothing to do with any problems and consequences. * * In case of code problems, feedback can be made through the following email * address. <xiaoandx@gmail.com> * * @Description:求二维数组a中的最大值和最小值 * @Author: WEI.ZHOU * @Date: 2020-11-14 08:56:42 * @Version: V1.0 */ #include <stdio.h> #define N 3 int main() { int i,j; int a[N][N]={{4,4,34},{37,3,12},{5,6,5}}; int max = 0, min = a[0][0]; for (i = 0; i < N; i++) { for (j = 0; j < N; j++){ if(a[i][j] < min){ min = a[i][j]; } if(a[i][j] > max){ max = a[i][j]; } } } printf("The max is:%d \n", max); printf("The min is:%d \n", min); return 0; }
842809.c
#include <stdlib.h> char *ft_strdup(char *src) { int i; char *dest; i = 0; while(src[i]) { i++; } if ((dest = (char *)malloc(sizeof(char) * (i + 1))) == NULL) return (NULL); i = 0; while (src[i]) { dest[i] = src[i]; i++; } return(dest); } // Ne pas rendre la main - Tester // #include <unistd.h> #include <stdio.h> int main(void) { char *src; src = "coucou"; printf("%s \n", ft_strdup(src)); return (0); }
578433.c
/* * mISDNinfineon.c * Support for cards based on following Infineon ISDN chipsets * - ISAC + HSCX * - IPAC and IPAC-X * - ISAC-SX + HSCX * * Supported cards: * - Dialogic Diva 2.0 * - Dialogic Diva 2.0U * - Dialogic Diva 2.01 * - Dialogic Diva 2.02 * - Sedlbauer Speedwin * - HST Saphir3 * - Develo (former ELSA) Microlink PCI (Quickstep 1000) * - Develo (former ELSA) Quickstep 3000 * - Berkom Scitel BRIX Quadro * - Dr.Neuhaus (Sagem) Niccy * * * * Author Karsten Keil <keil@isdn4linux.de> * * Copyright 2009 by Karsten Keil <keil@isdn4linux.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/module.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/mISDNhw.h> #include <linux/slab.h> #include "ipac.h" #define INFINEON_REV "1.0" static int inf_cnt; static u32 debug; static u32 irqloops = 4; enum inf_types { INF_NONE, INF_DIVA20, INF_DIVA20U, INF_DIVA201, INF_DIVA202, INF_SPEEDWIN, INF_SAPHIR3, INF_QS1000, INF_QS3000, INF_NICCY, INF_SCT_1, INF_SCT_2, INF_SCT_3, INF_SCT_4, INF_GAZEL_R685, INF_GAZEL_R753 }; enum addr_mode { AM_NONE = 0, AM_IO, AM_MEMIO, AM_IND_IO, }; struct inf_cinfo { enum inf_types typ; const char *full; const char *name; enum addr_mode cfg_mode; enum addr_mode addr_mode; u8 cfg_bar; u8 addr_bar; void *irqfunc; }; struct _ioaddr { enum addr_mode mode; union { void __iomem *p; struct _ioport io; } a; }; struct _iohandle { enum addr_mode mode; resource_size_t size; resource_size_t start; void __iomem *p; }; struct inf_hw { struct list_head list; struct pci_dev *pdev; const struct inf_cinfo *ci; char name[MISDN_MAX_IDLEN]; u32 irq; u32 irqcnt; struct _iohandle cfg; struct _iohandle addr; struct _ioaddr isac; struct _ioaddr hscx; spinlock_t lock; /* HW access lock */ struct ipac_hw ipac; struct inf_hw *sc[3]; /* slave cards */ }; #define PCI_SUBVENDOR_HST_SAPHIR3 0x52 #define PCI_SUBVENDOR_SEDLBAUER_PCI 0x53 #define PCI_SUB_ID_SEDLBAUER 0x01 static struct pci_device_id infineon_ids[] __devinitdata = { { PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_DIVA20}, { PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA20_U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_DIVA20U}, { PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_DIVA201}, { PCI_VENDOR_ID_EICON, PCI_DEVICE_ID_EICON_DIVA202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_DIVA202}, { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100, PCI_SUBVENDOR_SEDLBAUER_PCI, PCI_SUB_ID_SEDLBAUER, 0, 0, INF_SPEEDWIN}, { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_100, PCI_SUBVENDOR_HST_SAPHIR3, PCI_SUB_ID_SEDLBAUER, 0, 0, INF_SAPHIR3}, { PCI_VENDOR_ID_ELSA, PCI_DEVICE_ID_ELSA_MICROLINK, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_QS1000}, { PCI_VENDOR_ID_ELSA, PCI_DEVICE_ID_ELSA_QS3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_QS3000}, { PCI_VENDOR_ID_SATSAGEM, PCI_DEVICE_ID_SATSAGEM_NICCY, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_NICCY}, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO, 0, 0, INF_SCT_1}, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_R685, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_GAZEL_R685}, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_R753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_GAZEL_R753}, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_DJINN_ITOO, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_GAZEL_R753}, { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_OLITEC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, INF_GAZEL_R753}, { } }; MODULE_DEVICE_TABLE(pci, infineon_ids); /* PCI interface specific defines */ /* Diva 2.0/2.0U */ #define DIVA_HSCX_PORT 0x00 #define DIVA_HSCX_ALE 0x04 #define DIVA_ISAC_PORT 0x08 #define DIVA_ISAC_ALE 0x0C #define DIVA_PCI_CTRL 0x10 /* DIVA_PCI_CTRL bits */ #define DIVA_IRQ_BIT 0x01 #define DIVA_RESET_BIT 0x08 #define DIVA_EEPROM_CLK 0x40 #define DIVA_LED_A 0x10 #define DIVA_LED_B 0x20 #define DIVA_IRQ_CLR 0x80 /* Diva 2.01/2.02 */ /* Siemens PITA */ #define PITA_ICR_REG 0x00 #define PITA_INT0_STATUS 0x02 #define PITA_MISC_REG 0x1c #define PITA_PARA_SOFTRESET 0x01000000 #define PITA_SER_SOFTRESET 0x02000000 #define PITA_PARA_MPX_MODE 0x04000000 #define PITA_INT0_ENABLE 0x00020000 /* TIGER 100 Registers */ #define TIGER_RESET_ADDR 0x00 #define TIGER_EXTERN_RESET 0x01 #define TIGER_AUX_CTRL 0x02 #define TIGER_AUX_DATA 0x03 #define TIGER_AUX_IRQMASK 0x05 #define TIGER_AUX_STATUS 0x07 /* Tiger AUX BITs */ #define TIGER_IOMASK 0xdd /* 1 and 5 are inputs */ #define TIGER_IRQ_BIT 0x02 #define TIGER_IPAC_ALE 0xC0 #define TIGER_IPAC_PORT 0xC8 /* ELSA (now Develo) PCI cards */ #define ELSA_IRQ_ADDR 0x4c #define ELSA_IRQ_MASK 0x04 #define QS1000_IRQ_OFF 0x01 #define QS3000_IRQ_OFF 0x03 #define QS1000_IRQ_ON 0x41 #define QS3000_IRQ_ON 0x43 /* Dr Neuhaus/Sagem Niccy */ #define NICCY_ISAC_PORT 0x00 #define NICCY_HSCX_PORT 0x01 #define NICCY_ISAC_ALE 0x02 #define NICCY_HSCX_ALE 0x03 #define NICCY_IRQ_CTRL_REG 0x38 #define NICCY_IRQ_ENABLE 0x001f00 #define NICCY_IRQ_DISABLE 0xff0000 #define NICCY_IRQ_BIT 0x800000 /* Scitel PLX */ #define SCT_PLX_IRQ_ADDR 0x4c #define SCT_PLX_RESET_ADDR 0x50 #define SCT_PLX_IRQ_ENABLE 0x41 #define SCT_PLX_RESET_BIT 0x04 /* Gazel */ #define GAZEL_IPAC_DATA_PORT 0x04 /* Gazel PLX */ #define GAZEL_CNTRL 0x50 #define GAZEL_RESET 0x04 #define GAZEL_RESET_9050 0x40000000 #define GAZEL_INCSR 0x4C #define GAZEL_ISAC_EN 0x08 #define GAZEL_INT_ISAC 0x20 #define GAZEL_HSCX_EN 0x01 #define GAZEL_INT_HSCX 0x04 #define GAZEL_PCI_EN 0x40 #define GAZEL_IPAC_EN 0x03 static LIST_HEAD(Cards); static DEFINE_RWLOCK(card_lock); /* protect Cards */ static void _set_debug(struct inf_hw *card) { card->ipac.isac.dch.debug = debug; card->ipac.hscx[0].bch.debug = debug; card->ipac.hscx[1].bch.debug = debug; } static int set_debug(const char *val, struct kernel_param *kp) { int ret; struct inf_hw *card; ret = param_set_uint(val, kp); if (!ret) { read_lock(&card_lock); list_for_each_entry(card, &Cards, list) _set_debug(card); read_unlock(&card_lock); } return ret; } MODULE_AUTHOR("Karsten Keil"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(INFINEON_REV); module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "infineon debug mask"); module_param(irqloops, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(irqloops, "infineon maximal irqloops (default 4)"); /* Interface functions */ IOFUNC_IO(ISAC, inf_hw, isac.a.io) IOFUNC_IO(IPAC, inf_hw, hscx.a.io) IOFUNC_IND(ISAC, inf_hw, isac.a.io) IOFUNC_IND(IPAC, inf_hw, hscx.a.io) IOFUNC_MEMIO(ISAC, inf_hw, u32, isac.a.p) IOFUNC_MEMIO(IPAC, inf_hw, u32, hscx.a.p) static irqreturn_t diva_irq(int intno, void *dev_id) { struct inf_hw *hw = dev_id; u8 val; spin_lock(&hw->lock); val = inb((u32)hw->cfg.start + DIVA_PCI_CTRL); if (!(val & DIVA_IRQ_BIT)) { /* for us or shared ? */ spin_unlock(&hw->lock); return IRQ_NONE; /* shared */ } hw->irqcnt++; mISDNipac_irq(&hw->ipac, irqloops); spin_unlock(&hw->lock); return IRQ_HANDLED; } static irqreturn_t diva20x_irq(int intno, void *dev_id) { struct inf_hw *hw = dev_id; u8 val; spin_lock(&hw->lock); val = readb(hw->cfg.p); if (!(val & PITA_INT0_STATUS)) { /* for us or shared ? */ spin_unlock(&hw->lock); return IRQ_NONE; /* shared */ } hw->irqcnt++; mISDNipac_irq(&hw->ipac, irqloops); writeb(PITA_INT0_STATUS, hw->cfg.p); /* ACK PITA INT0 */ spin_unlock(&hw->lock); return IRQ_HANDLED; } static irqreturn_t tiger_irq(int intno, void *dev_id) { struct inf_hw *hw = dev_id; u8 val; spin_lock(&hw->lock); val = inb((u32)hw->cfg.start + TIGER_AUX_STATUS); if (val & TIGER_IRQ_BIT) { /* for us or shared ? */ spin_unlock(&hw->lock); return IRQ_NONE; /* shared */ } hw->irqcnt++; mISDNipac_irq(&hw->ipac, irqloops); spin_unlock(&hw->lock); return IRQ_HANDLED; } static irqreturn_t elsa_irq(int intno, void *dev_id) { struct inf_hw *hw = dev_id; u8 val; spin_lock(&hw->lock); val = inb((u32)hw->cfg.start + ELSA_IRQ_ADDR); if (!(val & ELSA_IRQ_MASK)) { spin_unlock(&hw->lock); return IRQ_NONE; /* shared */ } hw->irqcnt++; mISDNipac_irq(&hw->ipac, irqloops); spin_unlock(&hw->lock); return IRQ_HANDLED; } static irqreturn_t niccy_irq(int intno, void *dev_id) { struct inf_hw *hw = dev_id; u32 val; spin_lock(&hw->lock); val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); if (!(val & NICCY_IRQ_BIT)) { /* for us or shared ? */ spin_unlock(&hw->lock); return IRQ_NONE; /* shared */ } outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); hw->irqcnt++; mISDNipac_irq(&hw->ipac, irqloops); spin_unlock(&hw->lock); return IRQ_HANDLED; } static irqreturn_t gazel_irq(int intno, void *dev_id) { struct inf_hw *hw = dev_id; irqreturn_t ret; spin_lock(&hw->lock); ret = mISDNipac_irq(&hw->ipac, irqloops); spin_unlock(&hw->lock); return ret; } static irqreturn_t ipac_irq(int intno, void *dev_id) { struct inf_hw *hw = dev_id; u8 val; spin_lock(&hw->lock); val = hw->ipac.read_reg(hw, IPAC_ISTA); if (!(val & 0x3f)) { spin_unlock(&hw->lock); return IRQ_NONE; /* shared */ } hw->irqcnt++; mISDNipac_irq(&hw->ipac, irqloops); spin_unlock(&hw->lock); return IRQ_HANDLED; } static void enable_hwirq(struct inf_hw *hw) { u16 w; u32 val; switch (hw->ci->typ) { case INF_DIVA201: case INF_DIVA202: writel(PITA_INT0_ENABLE, hw->cfg.p); break; case INF_SPEEDWIN: case INF_SAPHIR3: outb(TIGER_IRQ_BIT, (u32)hw->cfg.start + TIGER_AUX_IRQMASK); break; case INF_QS1000: outb(QS1000_IRQ_ON, (u32)hw->cfg.start + ELSA_IRQ_ADDR); break; case INF_QS3000: outb(QS3000_IRQ_ON, (u32)hw->cfg.start + ELSA_IRQ_ADDR); break; case INF_NICCY: val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); val |= NICCY_IRQ_ENABLE;; outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); break; case INF_SCT_1: w = inw((u32)hw->cfg.start + SCT_PLX_IRQ_ADDR); w |= SCT_PLX_IRQ_ENABLE; outw(w, (u32)hw->cfg.start + SCT_PLX_IRQ_ADDR); break; case INF_GAZEL_R685: outb(GAZEL_ISAC_EN + GAZEL_HSCX_EN + GAZEL_PCI_EN, (u32)hw->cfg.start + GAZEL_INCSR); break; case INF_GAZEL_R753: outb(GAZEL_IPAC_EN + GAZEL_PCI_EN, (u32)hw->cfg.start + GAZEL_INCSR); break; default: break; } } static void disable_hwirq(struct inf_hw *hw) { u16 w; u32 val; switch (hw->ci->typ) { case INF_DIVA201: case INF_DIVA202: writel(0, hw->cfg.p); break; case INF_SPEEDWIN: case INF_SAPHIR3: outb(0, (u32)hw->cfg.start + TIGER_AUX_IRQMASK); break; case INF_QS1000: outb(QS1000_IRQ_OFF, (u32)hw->cfg.start + ELSA_IRQ_ADDR); break; case INF_QS3000: outb(QS3000_IRQ_OFF, (u32)hw->cfg.start + ELSA_IRQ_ADDR); break; case INF_NICCY: val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); val &= NICCY_IRQ_DISABLE; outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG); break; case INF_SCT_1: w = inw((u32)hw->cfg.start + SCT_PLX_IRQ_ADDR); w &= (~SCT_PLX_IRQ_ENABLE); outw(w, (u32)hw->cfg.start + SCT_PLX_IRQ_ADDR); break; case INF_GAZEL_R685: case INF_GAZEL_R753: outb(0, (u32)hw->cfg.start + GAZEL_INCSR); break; default: break; } } static void ipac_chip_reset(struct inf_hw *hw) { hw->ipac.write_reg(hw, IPAC_POTA2, 0x20); mdelay(5); hw->ipac.write_reg(hw, IPAC_POTA2, 0x00); mdelay(5); hw->ipac.write_reg(hw, IPAC_CONF, hw->ipac.conf); hw->ipac.write_reg(hw, IPAC_MASK, 0xc0); } static void reset_inf(struct inf_hw *hw) { u16 w; u32 val; if (debug & DEBUG_HW) pr_notice("%s: resetting card\n", hw->name); switch (hw->ci->typ) { case INF_DIVA20: case INF_DIVA20U: outb(0, (u32)hw->cfg.start + DIVA_PCI_CTRL); mdelay(10); outb(DIVA_RESET_BIT, (u32)hw->cfg.start + DIVA_PCI_CTRL); mdelay(10); /* Workaround PCI9060 */ outb(9, (u32)hw->cfg.start + 0x69); outb(DIVA_RESET_BIT | DIVA_LED_A, (u32)hw->cfg.start + DIVA_PCI_CTRL); break; case INF_DIVA201: writel(PITA_PARA_SOFTRESET | PITA_PARA_MPX_MODE, hw->cfg.p + PITA_MISC_REG); mdelay(1); writel(PITA_PARA_MPX_MODE, hw->cfg.p + PITA_MISC_REG); mdelay(10); break; case INF_DIVA202: writel(PITA_PARA_SOFTRESET | PITA_PARA_MPX_MODE, hw->cfg.p + PITA_MISC_REG); mdelay(1); writel(PITA_PARA_MPX_MODE | PITA_SER_SOFTRESET, hw->cfg.p + PITA_MISC_REG); mdelay(10); break; case INF_SPEEDWIN: case INF_SAPHIR3: ipac_chip_reset(hw); hw->ipac.write_reg(hw, IPAC_ACFG, 0xff); hw->ipac.write_reg(hw, IPAC_AOE, 0x00); hw->ipac.write_reg(hw, IPAC_PCFG, 0x12); break; case INF_QS1000: case INF_QS3000: ipac_chip_reset(hw); hw->ipac.write_reg(hw, IPAC_ACFG, 0x00); hw->ipac.write_reg(hw, IPAC_AOE, 0x3c); hw->ipac.write_reg(hw, IPAC_ATX, 0xff); break; case INF_NICCY: break; case INF_SCT_1: w = inw((u32)hw->cfg.start + SCT_PLX_RESET_ADDR); w &= (~SCT_PLX_RESET_BIT); outw(w, (u32)hw->cfg.start + SCT_PLX_RESET_ADDR); mdelay(10); w = inw((u32)hw->cfg.start + SCT_PLX_RESET_ADDR); w |= SCT_PLX_RESET_BIT; outw(w, (u32)hw->cfg.start + SCT_PLX_RESET_ADDR); mdelay(10); break; case INF_GAZEL_R685: val = inl((u32)hw->cfg.start + GAZEL_CNTRL); val |= (GAZEL_RESET_9050 + GAZEL_RESET); outl(val, (u32)hw->cfg.start + GAZEL_CNTRL); val &= ~(GAZEL_RESET_9050 + GAZEL_RESET); mdelay(4); outl(val, (u32)hw->cfg.start + GAZEL_CNTRL); mdelay(10); hw->ipac.isac.adf2 = 0x87; hw->ipac.hscx[0].slot = 0x1f; hw->ipac.hscx[0].slot = 0x23; break; case INF_GAZEL_R753: val = inl((u32)hw->cfg.start + GAZEL_CNTRL); val |= (GAZEL_RESET_9050 + GAZEL_RESET); outl(val, (u32)hw->cfg.start + GAZEL_CNTRL); val &= ~(GAZEL_RESET_9050 + GAZEL_RESET); mdelay(4); outl(val, (u32)hw->cfg.start + GAZEL_CNTRL); mdelay(10); ipac_chip_reset(hw); hw->ipac.write_reg(hw, IPAC_ACFG, 0xff); hw->ipac.write_reg(hw, IPAC_AOE, 0x00); hw->ipac.conf = 0x01; /* IOM off */ break; default: return; } enable_hwirq(hw); } static int inf_ctrl(struct inf_hw *hw, u32 cmd, u_long arg) { int ret = 0; switch (cmd) { case HW_RESET_REQ: reset_inf(hw); break; default: pr_info("%s: %s unknown command %x %lx\n", hw->name, __func__, cmd, arg); ret = -EINVAL; break; } return ret; } static int __devinit init_irq(struct inf_hw *hw) { int ret, cnt = 3; u_long flags; if (!hw->ci->irqfunc) return -EINVAL; ret = request_irq(hw->irq, hw->ci->irqfunc, IRQF_SHARED, hw->name, hw); if (ret) { pr_info("%s: couldn't get interrupt %d\n", hw->name, hw->irq); return ret; } while (cnt--) { spin_lock_irqsave(&hw->lock, flags); reset_inf(hw); ret = hw->ipac.init(&hw->ipac); if (ret) { spin_unlock_irqrestore(&hw->lock, flags); pr_info("%s: ISAC init failed with %d\n", hw->name, ret); break; } spin_unlock_irqrestore(&hw->lock, flags); msleep_interruptible(10); if (debug & DEBUG_HW) pr_notice("%s: IRQ %d count %d\n", hw->name, hw->irq, hw->irqcnt); if (!hw->irqcnt) { pr_info("%s: IRQ(%d) got no requests during init %d\n", hw->name, hw->irq, 3 - cnt); } else return 0; } free_irq(hw->irq, hw); return -EIO; } static void release_io(struct inf_hw *hw) { if (hw->cfg.mode) { if (hw->cfg.p) { release_mem_region(hw->cfg.start, hw->cfg.size); iounmap(hw->cfg.p); } else release_region(hw->cfg.start, hw->cfg.size); hw->cfg.mode = AM_NONE; } if (hw->addr.mode) { if (hw->addr.p) { release_mem_region(hw->addr.start, hw->addr.size); iounmap(hw->addr.p); } else release_region(hw->addr.start, hw->addr.size); hw->addr.mode = AM_NONE; } } static int __devinit setup_io(struct inf_hw *hw) { int err = 0; if (hw->ci->cfg_mode) { hw->cfg.start = pci_resource_start(hw->pdev, hw->ci->cfg_bar); hw->cfg.size = pci_resource_len(hw->pdev, hw->ci->cfg_bar); if (hw->ci->cfg_mode == AM_MEMIO) { if (!request_mem_region(hw->cfg.start, hw->cfg.size, hw->name)) err = -EBUSY; } else { if (!request_region(hw->cfg.start, hw->cfg.size, hw->name)) err = -EBUSY; } if (err) { pr_info("mISDN: %s config port %lx (%lu bytes)" "already in use\n", hw->name, (ulong)hw->cfg.start, (ulong)hw->cfg.size); return err; } if (hw->ci->cfg_mode == AM_MEMIO) hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size); hw->cfg.mode = hw->ci->cfg_mode; if (debug & DEBUG_HW) pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n", hw->name, (ulong)hw->cfg.start, (ulong)hw->cfg.size, hw->ci->cfg_mode); } if (hw->ci->addr_mode) { hw->addr.start = pci_resource_start(hw->pdev, hw->ci->addr_bar); hw->addr.size = pci_resource_len(hw->pdev, hw->ci->addr_bar); if (hw->ci->addr_mode == AM_MEMIO) { if (!request_mem_region(hw->addr.start, hw->addr.size, hw->name)) err = -EBUSY; } else { if (!request_region(hw->addr.start, hw->addr.size, hw->name)) err = -EBUSY; } if (err) { pr_info("mISDN: %s address port %lx (%lu bytes)" "already in use\n", hw->name, (ulong)hw->addr.start, (ulong)hw->addr.size); return err; } if (hw->ci->addr_mode == AM_MEMIO) hw->addr.p = ioremap(hw->addr.start, hw->addr.size); hw->addr.mode = hw->ci->addr_mode; if (debug & DEBUG_HW) pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n", hw->name, (ulong)hw->addr.start, (ulong)hw->addr.size, hw->ci->addr_mode); } switch (hw->ci->typ) { case INF_DIVA20: case INF_DIVA20U: hw->ipac.type = IPAC_TYPE_ISAC | IPAC_TYPE_HSCX; hw->isac.mode = hw->cfg.mode; hw->isac.a.io.ale = (u32)hw->cfg.start + DIVA_ISAC_ALE; hw->isac.a.io.port = (u32)hw->cfg.start + DIVA_ISAC_PORT; hw->hscx.mode = hw->cfg.mode; hw->hscx.a.io.ale = (u32)hw->cfg.start + DIVA_HSCX_ALE; hw->hscx.a.io.port = (u32)hw->cfg.start + DIVA_HSCX_PORT; break; case INF_DIVA201: hw->ipac.type = IPAC_TYPE_IPAC; hw->ipac.isac.off = 0x80; hw->isac.mode = hw->addr.mode; hw->isac.a.p = hw->addr.p; hw->hscx.mode = hw->addr.mode; hw->hscx.a.p = hw->addr.p; break; case INF_DIVA202: hw->ipac.type = IPAC_TYPE_IPACX; hw->isac.mode = hw->addr.mode; hw->isac.a.p = hw->addr.p; hw->hscx.mode = hw->addr.mode; hw->hscx.a.p = hw->addr.p; break; case INF_SPEEDWIN: case INF_SAPHIR3: hw->ipac.type = IPAC_TYPE_IPAC; hw->ipac.isac.off = 0x80; hw->isac.mode = hw->cfg.mode; hw->isac.a.io.ale = (u32)hw->cfg.start + TIGER_IPAC_ALE; hw->isac.a.io.port = (u32)hw->cfg.start + TIGER_IPAC_PORT; hw->hscx.mode = hw->cfg.mode; hw->hscx.a.io.ale = (u32)hw->cfg.start + TIGER_IPAC_ALE; hw->hscx.a.io.port = (u32)hw->cfg.start + TIGER_IPAC_PORT; outb(0xff, (ulong)hw->cfg.start); mdelay(1); outb(0x00, (ulong)hw->cfg.start); mdelay(1); outb(TIGER_IOMASK, (ulong)hw->cfg.start + TIGER_AUX_CTRL); break; case INF_QS1000: case INF_QS3000: hw->ipac.type = IPAC_TYPE_IPAC; hw->ipac.isac.off = 0x80; hw->isac.a.io.ale = (u32)hw->addr.start; hw->isac.a.io.port = (u32)hw->addr.start + 1; hw->isac.mode = hw->addr.mode; hw->hscx.a.io.ale = (u32)hw->addr.start; hw->hscx.a.io.port = (u32)hw->addr.start + 1; hw->hscx.mode = hw->addr.mode; break; case INF_NICCY: hw->ipac.type = IPAC_TYPE_ISAC | IPAC_TYPE_HSCX; hw->isac.mode = hw->addr.mode; hw->isac.a.io.ale = (u32)hw->addr.start + NICCY_ISAC_ALE; hw->isac.a.io.port = (u32)hw->addr.start + NICCY_ISAC_PORT; hw->hscx.mode = hw->addr.mode; hw->hscx.a.io.ale = (u32)hw->addr.start + NICCY_HSCX_ALE; hw->hscx.a.io.port = (u32)hw->addr.start + NICCY_HSCX_PORT; break; case INF_SCT_1: hw->ipac.type = IPAC_TYPE_IPAC; hw->ipac.isac.off = 0x80; hw->isac.a.io.ale = (u32)hw->addr.start; hw->isac.a.io.port = hw->isac.a.io.ale + 4; hw->isac.mode = hw->addr.mode; hw->hscx.a.io.ale = hw->isac.a.io.ale; hw->hscx.a.io.port = hw->isac.a.io.port; hw->hscx.mode = hw->addr.mode; break; case INF_SCT_2: hw->ipac.type = IPAC_TYPE_IPAC; hw->ipac.isac.off = 0x80; hw->isac.a.io.ale = (u32)hw->addr.start + 0x08; hw->isac.a.io.port = hw->isac.a.io.ale + 4; hw->isac.mode = hw->addr.mode; hw->hscx.a.io.ale = hw->isac.a.io.ale; hw->hscx.a.io.port = hw->isac.a.io.port; hw->hscx.mode = hw->addr.mode; break; case INF_SCT_3: hw->ipac.type = IPAC_TYPE_IPAC; hw->ipac.isac.off = 0x80; hw->isac.a.io.ale = (u32)hw->addr.start + 0x10; hw->isac.a.io.port = hw->isac.a.io.ale + 4; hw->isac.mode = hw->addr.mode; hw->hscx.a.io.ale = hw->isac.a.io.ale; hw->hscx.a.io.port = hw->isac.a.io.port; hw->hscx.mode = hw->addr.mode; break; case INF_SCT_4: hw->ipac.type = IPAC_TYPE_IPAC; hw->ipac.isac.off = 0x80; hw->isac.a.io.ale = (u32)hw->addr.start + 0x20; hw->isac.a.io.port = hw->isac.a.io.ale + 4; hw->isac.mode = hw->addr.mode; hw->hscx.a.io.ale = hw->isac.a.io.ale; hw->hscx.a.io.port = hw->isac.a.io.port; hw->hscx.mode = hw->addr.mode; break; case INF_GAZEL_R685: hw->ipac.type = IPAC_TYPE_ISAC | IPAC_TYPE_HSCX; hw->ipac.isac.off = 0x80; hw->isac.mode = hw->addr.mode; hw->isac.a.io.port = (u32)hw->addr.start; hw->hscx.mode = hw->addr.mode; hw->hscx.a.io.port = hw->isac.a.io.port; break; case INF_GAZEL_R753: hw->ipac.type = IPAC_TYPE_IPAC; hw->ipac.isac.off = 0x80; hw->isac.mode = hw->addr.mode; hw->isac.a.io.ale = (u32)hw->addr.start; hw->isac.a.io.port = (u32)hw->addr.start + GAZEL_IPAC_DATA_PORT; hw->hscx.mode = hw->addr.mode; hw->hscx.a.io.ale = hw->isac.a.io.ale; hw->hscx.a.io.port = hw->isac.a.io.port; break; default: return -EINVAL; } switch (hw->isac.mode) { case AM_MEMIO: ASSIGN_FUNC_IPAC(MIO, hw->ipac); break; case AM_IND_IO: ASSIGN_FUNC_IPAC(IND, hw->ipac); break; case AM_IO: ASSIGN_FUNC_IPAC(IO, hw->ipac); break; default: return -EINVAL; } return 0; } static void release_card(struct inf_hw *card) { ulong flags; int i; spin_lock_irqsave(&card->lock, flags); disable_hwirq(card); spin_unlock_irqrestore(&card->lock, flags); card->ipac.isac.release(&card->ipac.isac); free_irq(card->irq, card); mISDN_unregister_device(&card->ipac.isac.dch.dev); release_io(card); write_lock_irqsave(&card_lock, flags); list_del(&card->list); write_unlock_irqrestore(&card_lock, flags); switch (card->ci->typ) { case INF_SCT_2: case INF_SCT_3: case INF_SCT_4: break; case INF_SCT_1: for (i = 0; i < 3; i++) { if (card->sc[i]) release_card(card->sc[i]); card->sc[i] = NULL; } default: pci_disable_device(card->pdev); pci_set_drvdata(card->pdev, NULL); break; } kfree(card); inf_cnt--; } static int __devinit setup_instance(struct inf_hw *card) { int err; ulong flags; snprintf(card->name, MISDN_MAX_IDLEN - 1, "%s.%d", card->ci->name, inf_cnt + 1); write_lock_irqsave(&card_lock, flags); list_add_tail(&card->list, &Cards); write_unlock_irqrestore(&card_lock, flags); _set_debug(card); card->ipac.isac.name = card->name; card->ipac.name = card->name; card->ipac.owner = THIS_MODULE; spin_lock_init(&card->lock); card->ipac.isac.hwlock = &card->lock; card->ipac.hwlock = &card->lock; card->ipac.ctrl = (void *)&inf_ctrl; err = setup_io(card); if (err) goto error_setup; card->ipac.isac.dch.dev.Bprotocols = mISDNipac_init(&card->ipac, card); if (card->ipac.isac.dch.dev.Bprotocols == 0) goto error_setup;; err = mISDN_register_device(&card->ipac.isac.dch.dev, &card->pdev->dev, card->name); if (err) goto error; err = init_irq(card); if (!err) { inf_cnt++; pr_notice("Infineon %d cards installed\n", inf_cnt); return 0; } mISDN_unregister_device(&card->ipac.isac.dch.dev); error: card->ipac.release(&card->ipac); error_setup: release_io(card); write_lock_irqsave(&card_lock, flags); list_del(&card->list); write_unlock_irqrestore(&card_lock, flags); return err; } static const struct inf_cinfo inf_card_info[] = { { INF_DIVA20, "Dialogic Diva 2.0", "diva20", AM_IND_IO, AM_NONE, 2, 0, &diva_irq }, { INF_DIVA20U, "Dialogic Diva 2.0U", "diva20U", AM_IND_IO, AM_NONE, 2, 0, &diva_irq }, { INF_DIVA201, "Dialogic Diva 2.01", "diva201", AM_MEMIO, AM_MEMIO, 0, 1, &diva20x_irq }, { INF_DIVA202, "Dialogic Diva 2.02", "diva202", AM_MEMIO, AM_MEMIO, 0, 1, &diva20x_irq }, { INF_SPEEDWIN, "Sedlbauer SpeedWin PCI", "speedwin", AM_IND_IO, AM_NONE, 0, 0, &tiger_irq }, { INF_SAPHIR3, "HST Saphir 3", "saphir", AM_IND_IO, AM_NONE, 0, 0, &tiger_irq }, { INF_QS1000, "Develo Microlink PCI", "qs1000", AM_IO, AM_IND_IO, 1, 3, &elsa_irq }, { INF_QS3000, "Develo QuickStep 3000", "qs3000", AM_IO, AM_IND_IO, 1, 3, &elsa_irq }, { INF_NICCY, "Sagem NICCY", "niccy", AM_IO, AM_IND_IO, 0, 1, &niccy_irq }, { INF_SCT_1, "SciTel Quadro", "p1_scitel", AM_IO, AM_IND_IO, 1, 5, &ipac_irq }, { INF_SCT_2, "SciTel Quadro", "p2_scitel", AM_NONE, AM_IND_IO, 0, 4, &ipac_irq }, { INF_SCT_3, "SciTel Quadro", "p3_scitel", AM_NONE, AM_IND_IO, 0, 3, &ipac_irq }, { INF_SCT_4, "SciTel Quadro", "p4_scitel", AM_NONE, AM_IND_IO, 0, 2, &ipac_irq }, { INF_GAZEL_R685, "Gazel R685", "gazel685", AM_IO, AM_IO, 1, 2, &gazel_irq }, { INF_GAZEL_R753, "Gazel R753", "gazel753", AM_IO, AM_IND_IO, 1, 2, &ipac_irq }, { INF_NONE, } }; static const struct inf_cinfo * __devinit get_card_info(enum inf_types typ) { const struct inf_cinfo *ci = inf_card_info; while (ci->typ != INF_NONE) { if (ci->typ == typ) return ci; ci++; } return NULL; } static int __devinit inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err = -ENOMEM; struct inf_hw *card; card = kzalloc(sizeof(struct inf_hw), GFP_KERNEL); if (!card) { pr_info("No memory for Infineon ISDN card\n"); return err; } card->pdev = pdev; err = pci_enable_device(pdev); if (err) { kfree(card); return err; } card->ci = get_card_info(ent->driver_data); if (!card->ci) { pr_info("mISDN: do not have informations about adapter at %s\n", pci_name(pdev)); kfree(card); return -EINVAL; } else pr_notice("mISDN: found adapter %s at %s\n", card->ci->full, pci_name(pdev)); card->irq = pdev->irq; pci_set_drvdata(pdev, card); err = setup_instance(card); if (err) { pci_disable_device(card->pdev); kfree(card); pci_set_drvdata(pdev, NULL); } else if (ent->driver_data == INF_SCT_1) { int i; struct inf_hw *sc; for (i = 1; i < 4; i++) { sc = kzalloc(sizeof(struct inf_hw), GFP_KERNEL); if (!sc) { release_card(card); return -ENOMEM; } sc->irq = card->irq; sc->pdev = card->pdev; sc->ci = card->ci + i; err = setup_instance(sc); if (err) { kfree(sc); release_card(card); break; } else card->sc[i - 1] = sc; } } return err; } static void __devexit inf_remove(struct pci_dev *pdev) { struct inf_hw *card = pci_get_drvdata(pdev); if (card) release_card(card); else pr_debug("%s: drvdata allready removed\n", __func__); } static struct pci_driver infineon_driver = { .name = "ISDN Infineon pci", .probe = inf_probe, .remove = __devexit_p(inf_remove), .id_table = infineon_ids, }; static int __init infineon_init(void) { int err; pr_notice("Infineon ISDN Driver Rev. %s\n", INFINEON_REV); err = pci_register_driver(&infineon_driver); return err; } static void __exit infineon_cleanup(void) { pci_unregister_driver(&infineon_driver); } module_init(infineon_init); module_exit(infineon_cleanup);
28305.c
/** ****************************************************************************** * @file stm32wbxx_ll_spi.c * @author MCD Application Team * @brief SPI LL module driver. ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2019 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under BSD 3-Clause license, * the "License"; You may not use this file except in compliance with the * License. You may obtain a copy of the License at: * opensource.org/licenses/BSD-3-Clause * ****************************************************************************** */ #if defined(USE_FULL_LL_DRIVER) /* Includes ------------------------------------------------------------------*/ #include "stm32wbxx_ll_spi.h" #include "stm32wbxx_ll_bus.h" #ifdef USE_FULL_ASSERT #include "stm32_assert.h" #else #define assert_param(expr) ((void)0U) #endif /** @addtogroup STM32WBxx_LL_Driver * @{ */ #if defined (SPI1) || defined (SPI2) /** @addtogroup SPI_LL * @{ */ /* Private types -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private constants ---------------------------------------------------------*/ /** @defgroup SPI_LL_Private_Constants SPI Private Constants * @{ */ /* SPI registers Masks */ #define SPI_CR1_CLEAR_MASK (SPI_CR1_CPHA | SPI_CR1_CPOL | SPI_CR1_MSTR | \ SPI_CR1_BR | SPI_CR1_LSBFIRST | SPI_CR1_SSI | \ SPI_CR1_SSM | SPI_CR1_RXONLY | SPI_CR1_CRCL | \ SPI_CR1_CRCNEXT | SPI_CR1_CRCEN | SPI_CR1_BIDIOE | \ SPI_CR1_BIDIMODE) /** * @} */ /* Private macros ------------------------------------------------------------*/ /** @defgroup SPI_LL_Private_Macros SPI Private Macros * @{ */ #define IS_LL_SPI_TRANSFER_DIRECTION(__VALUE__) (((__VALUE__) == LL_SPI_FULL_DUPLEX) \ || ((__VALUE__) == LL_SPI_SIMPLEX_RX) \ || ((__VALUE__) == LL_SPI_HALF_DUPLEX_RX) \ || ((__VALUE__) == LL_SPI_HALF_DUPLEX_TX)) #define IS_LL_SPI_MODE(__VALUE__) (((__VALUE__) == LL_SPI_MODE_MASTER) \ || ((__VALUE__) == LL_SPI_MODE_SLAVE)) #define IS_LL_SPI_DATAWIDTH(__VALUE__) (((__VALUE__) == LL_SPI_DATAWIDTH_4BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_5BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_6BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_7BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_8BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_9BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_10BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_11BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_12BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_13BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_14BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_15BIT) \ || ((__VALUE__) == LL_SPI_DATAWIDTH_16BIT)) #define IS_LL_SPI_POLARITY(__VALUE__) (((__VALUE__) == LL_SPI_POLARITY_LOW) \ || ((__VALUE__) == LL_SPI_POLARITY_HIGH)) #define IS_LL_SPI_PHASE(__VALUE__) (((__VALUE__) == LL_SPI_PHASE_1EDGE) \ || ((__VALUE__) == LL_SPI_PHASE_2EDGE)) #define IS_LL_SPI_NSS(__VALUE__) (((__VALUE__) == LL_SPI_NSS_SOFT) \ || ((__VALUE__) == LL_SPI_NSS_HARD_INPUT) \ || ((__VALUE__) == LL_SPI_NSS_HARD_OUTPUT)) #define IS_LL_SPI_BAUDRATE(__VALUE__) (((__VALUE__) == LL_SPI_BAUDRATEPRESCALER_DIV2) \ || ((__VALUE__) == LL_SPI_BAUDRATEPRESCALER_DIV4) \ || ((__VALUE__) == LL_SPI_BAUDRATEPRESCALER_DIV8) \ || ((__VALUE__) == LL_SPI_BAUDRATEPRESCALER_DIV16) \ || ((__VALUE__) == LL_SPI_BAUDRATEPRESCALER_DIV32) \ || ((__VALUE__) == LL_SPI_BAUDRATEPRESCALER_DIV64) \ || ((__VALUE__) == LL_SPI_BAUDRATEPRESCALER_DIV128) \ || ((__VALUE__) == LL_SPI_BAUDRATEPRESCALER_DIV256)) #define IS_LL_SPI_BITORDER(__VALUE__) (((__VALUE__) == LL_SPI_LSB_FIRST) \ || ((__VALUE__) == LL_SPI_MSB_FIRST)) #define IS_LL_SPI_CRCCALCULATION(__VALUE__) (((__VALUE__) == LL_SPI_CRCCALCULATION_ENABLE) \ || ((__VALUE__) == LL_SPI_CRCCALCULATION_DISABLE)) #define IS_LL_SPI_CRC_POLYNOMIAL(__VALUE__) ((__VALUE__) >= 0x1U) /** * @} */ /* Private function prototypes -----------------------------------------------*/ /* Exported functions --------------------------------------------------------*/ /** @addtogroup SPI_LL_Exported_Functions * @{ */ /** @addtogroup SPI_LL_EF_Init * @{ */ /** * @brief De-initialize the SPI registers to their default reset values. * @param SPIx SPI Instance * @retval An ErrorStatus enumeration value: * - SUCCESS: SPI registers are de-initialized * - ERROR: SPI registers are not de-initialized */ ErrorStatus LL_SPI_DeInit(SPI_TypeDef *SPIx) { ErrorStatus status = ERROR; /* Check the parameters */ assert_param(IS_SPI_ALL_INSTANCE(SPIx)); #if defined(SPI1) if (SPIx == SPI1) { /* Force reset of SPI clock */ LL_APB2_GRP1_ForceReset(LL_APB2_GRP1_PERIPH_SPI1); /* Release reset of SPI clock */ LL_APB2_GRP1_ReleaseReset(LL_APB2_GRP1_PERIPH_SPI1); status = SUCCESS; } #endif /* SPI1 */ #if defined(SPI2) if (SPIx == SPI2) { /* Force reset of SPI clock */ LL_APB1_GRP1_ForceReset(LL_APB1_GRP1_PERIPH_SPI2); /* Release reset of SPI clock */ LL_APB1_GRP1_ReleaseReset(LL_APB1_GRP1_PERIPH_SPI2); status = SUCCESS; } #endif /* SPI2 */ return status; } /** * @brief Initialize the SPI registers according to the specified parameters in SPI_InitStruct. * @note As some bits in SPI configuration registers can only be written when the SPI is disabled (SPI_CR1_SPE bit =0), * SPI peripheral should be in disabled state prior calling this function. Otherwise, ERROR result will be returned. * @param SPIx SPI Instance * @param SPI_InitStruct pointer to a @ref LL_SPI_InitTypeDef structure * @retval An ErrorStatus enumeration value. (Return always SUCCESS) */ ErrorStatus LL_SPI_Init(SPI_TypeDef *SPIx, LL_SPI_InitTypeDef *SPI_InitStruct) { ErrorStatus status = ERROR; /* Check the SPI Instance SPIx*/ assert_param(IS_SPI_ALL_INSTANCE(SPIx)); /* Check the SPI parameters from SPI_InitStruct*/ assert_param(IS_LL_SPI_TRANSFER_DIRECTION(SPI_InitStruct->TransferDirection)); assert_param(IS_LL_SPI_MODE(SPI_InitStruct->Mode)); assert_param(IS_LL_SPI_DATAWIDTH(SPI_InitStruct->DataWidth)); assert_param(IS_LL_SPI_POLARITY(SPI_InitStruct->ClockPolarity)); assert_param(IS_LL_SPI_PHASE(SPI_InitStruct->ClockPhase)); assert_param(IS_LL_SPI_NSS(SPI_InitStruct->NSS)); assert_param(IS_LL_SPI_BAUDRATE(SPI_InitStruct->BaudRate)); assert_param(IS_LL_SPI_BITORDER(SPI_InitStruct->BitOrder)); assert_param(IS_LL_SPI_CRCCALCULATION(SPI_InitStruct->CRCCalculation)); if (LL_SPI_IsEnabled(SPIx) == 0x00000000U) { /*---------------------------- SPIx CR1 Configuration ------------------------ * Configure SPIx CR1 with parameters: * - TransferDirection: SPI_CR1_BIDIMODE, SPI_CR1_BIDIOE and SPI_CR1_RXONLY bits * - Master/Slave Mode: SPI_CR1_MSTR bit * - ClockPolarity: SPI_CR1_CPOL bit * - ClockPhase: SPI_CR1_CPHA bit * - NSS management: SPI_CR1_SSM bit * - BaudRate prescaler: SPI_CR1_BR[2:0] bits * - BitOrder: SPI_CR1_LSBFIRST bit * - CRCCalculation: SPI_CR1_CRCEN bit */ MODIFY_REG(SPIx->CR1, SPI_CR1_CLEAR_MASK, SPI_InitStruct->TransferDirection | SPI_InitStruct->Mode | SPI_InitStruct->ClockPolarity | SPI_InitStruct->ClockPhase | SPI_InitStruct->NSS | SPI_InitStruct->BaudRate | SPI_InitStruct->BitOrder | SPI_InitStruct->CRCCalculation); /*---------------------------- SPIx CR2 Configuration ------------------------ * Configure SPIx CR2 with parameters: * - DataWidth: DS[3:0] bits * - NSS management: SSOE bit */ MODIFY_REG(SPIx->CR2, SPI_CR2_DS | SPI_CR2_SSOE, SPI_InitStruct->DataWidth | (SPI_InitStruct->NSS >> 16U)); /*---------------------------- SPIx CRCPR Configuration ---------------------- * Configure SPIx CRCPR with parameters: * - CRCPoly: CRCPOLY[15:0] bits */ if (SPI_InitStruct->CRCCalculation == LL_SPI_CRCCALCULATION_ENABLE) { assert_param(IS_LL_SPI_CRC_POLYNOMIAL(SPI_InitStruct->CRCPoly)); LL_SPI_SetCRCPolynomial(SPIx, SPI_InitStruct->CRCPoly); } status = SUCCESS; } return status; } /** * @brief Set each @ref LL_SPI_InitTypeDef field to default value. * @param SPI_InitStruct pointer to a @ref LL_SPI_InitTypeDef structure * whose fields will be set to default values. * @retval None */ void LL_SPI_StructInit(LL_SPI_InitTypeDef *SPI_InitStruct) { /* Set SPI_InitStruct fields to default values */ SPI_InitStruct->TransferDirection = LL_SPI_FULL_DUPLEX; SPI_InitStruct->Mode = LL_SPI_MODE_SLAVE; SPI_InitStruct->DataWidth = LL_SPI_DATAWIDTH_8BIT; SPI_InitStruct->ClockPolarity = LL_SPI_POLARITY_LOW; SPI_InitStruct->ClockPhase = LL_SPI_PHASE_1EDGE; SPI_InitStruct->NSS = LL_SPI_NSS_HARD_INPUT; SPI_InitStruct->BaudRate = LL_SPI_BAUDRATEPRESCALER_DIV2; SPI_InitStruct->BitOrder = LL_SPI_MSB_FIRST; SPI_InitStruct->CRCCalculation = LL_SPI_CRCCALCULATION_DISABLE; SPI_InitStruct->CRCPoly = 7U; } /** * @} */ /** * @} */ /** * @} */ #endif /* defined (SPI1) || defined (SPI2) */ /** * @} */ #endif /* USE_FULL_LL_DRIVER */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
884981.c
/**************************************************************************** * fs/vfs/fs_open.c * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. The * ASF licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <sys/types.h> #include <sys/stat.h> #include <stdbool.h> #include <fcntl.h> #include <sched.h> #include <errno.h> #include <assert.h> #include <stdarg.h> #include <nuttx/cancelpt.h> #include <nuttx/fs/fs.h> #include "inode/inode.h" #include "driver/driver.h" /**************************************************************************** * Private Functions ****************************************************************************/ /**************************************************************************** * Name: file_vopen ****************************************************************************/ static int file_vopen(FAR struct file *filep, FAR const char *path, int oflags, va_list ap) { struct inode_search_s desc; FAR struct inode *inode; #ifndef CONFIG_DISABLE_MOUNTPOINT mode_t mode = 0666; #endif int ret; if (path == NULL) { return -EINVAL; } #ifndef CONFIG_DISABLE_MOUNTPOINT /* If the file is opened for creation, then get the mode bits */ if ((oflags & (O_WRONLY | O_CREAT)) != 0) { mode = va_arg(ap, mode_t); } mode &= ~getumask(); #endif /* Get an inode for this file */ SETUP_SEARCH(&desc, path, false); ret = inode_find(&desc); if (ret < 0) { /* "O_CREAT is not set and the named file does not exist. Or, a * directory component in pathname does not exist or is a dangling * symbolic link." */ goto errout_with_search; } /* Get the search results */ inode = desc.node; DEBUGASSERT(inode != NULL); #if defined(CONFIG_BCH) && \ !defined(CONFIG_DISABLE_MOUNTPOINT) && \ !defined(CONFIG_DISABLE_PSEUDOFS_OPERATIONS) /* If the inode is block driver, then we may return a character driver * proxy for the block driver. block_proxy() will instantiate a BCH * character driver wrapper around the block driver, open(), then * unlink() the character driver. * * NOTE: This will recurse to open the character driver proxy. */ if (INODE_IS_BLOCK(inode) || INODE_IS_MTD(inode)) { /* Release the inode reference */ inode_release(inode); RELEASE_SEARCH(&desc); /* Get the file structure of the opened character driver proxy */ return block_proxy(filep, path, oflags); } else #endif /* Verify that the inode is either a "normal" character driver or a * mountpoint. We specifically "special" inodes (semaphores, message * queues, shared memory). */ #ifndef CONFIG_DISABLE_MOUNTPOINT if ((!INODE_IS_DRIVER(inode) && !INODE_IS_MOUNTPT(inode)) || !inode->u.i_ops) #else if (!INODE_IS_DRIVER(inode) || !inode->u.i_ops) #endif { ret = -ENXIO; goto errout_with_inode; } /* Make sure that the inode supports the requested access */ ret = inode_checkflags(inode, oflags); if (ret < 0) { goto errout_with_inode; } /* Associate the inode with a file structure */ filep->f_oflags = oflags; filep->f_pos = 0; filep->f_inode = inode; filep->f_priv = NULL; /* Perform the driver open operation. NOTE that the open method may be * called many times. The driver/mountpoint logic should handled this * because it may also be closed that many times. */ ret = OK; if (inode->u.i_ops->open) { #ifndef CONFIG_DISABLE_MOUNTPOINT if (INODE_IS_MOUNTPT(inode)) { ret = inode->u.i_mops->open(filep, desc.relpath, oflags, mode); } else #endif { ret = inode->u.i_ops->open(filep); } } if (ret < 0) { goto errout_with_inode; } RELEASE_SEARCH(&desc); return OK; errout_with_inode: filep->f_inode = NULL; inode_release(inode); errout_with_search: RELEASE_SEARCH(&desc); return ret; } /**************************************************************************** * Name: nx_vopen ****************************************************************************/ static int nx_vopen(FAR const char *path, int oflags, va_list ap) { struct file filep; int ret; int fd; /* Let file_vopen() do all of the work */ ret = file_vopen(&filep, path, oflags, ap); if (ret < 0) { return ret; } /* Allocate a new file descriptor for the inode */ fd = files_allocate(filep.f_inode, filep.f_oflags, filep.f_pos, filep.f_priv, 0); if (fd < 0) { file_close(&filep); return fd; } return fd; } /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: inode_checkflags * * Description: * Check if the access described by 'oflags' is supported on 'inode' * ****************************************************************************/ int inode_checkflags(FAR struct inode *inode, int oflags) { if (((oflags & O_RDOK) != 0 && !inode->u.i_ops->read) || ((oflags & O_WROK) != 0 && !inode->u.i_ops->write)) { return -EACCES; } else { return OK; } } /**************************************************************************** * Name: file_open * * Description: * file_open() is similar to the standard 'open' interface except that it * returns an instance of 'struct file' rather than a file descriptor. It * also is not a cancellation point and does not modify the errno variable. * * Input Parameters: * filep - The caller provided location in which to return the 'struct * file' instance. * path - The full path to the file to be open. * oflags - open flags * ... - Variable number of arguments, may include 'mode_t mode' * * Returned Value: * Zero (OK) is returned on success. On failure, a negated errno value is * returned. * ****************************************************************************/ int file_open(FAR struct file *filep, FAR const char *path, int oflags, ...) { va_list ap; int ret; va_start(ap, oflags); ret = file_vopen(filep, path, oflags, ap); va_end(ap); return ret; } /**************************************************************************** * Name: nx_open * * Description: * nx_open() is similar to the standard 'open' interface except that is is * not a cancellation point and it does not modify the errno variable. * * nx_open() is an internal NuttX interface and should not be called from * applications. * * Returned Value: * The new file descriptor is returned on success; a negated errno value is * returned on any failure. * ****************************************************************************/ int nx_open(FAR const char *path, int oflags, ...) { va_list ap; int fd; /* Let nx_vopen() do all of the work */ va_start(ap, oflags); fd = nx_vopen(path, oflags, ap); va_end(ap); return fd; } /**************************************************************************** * Name: open * * Description: * Standard 'open' interface * * Returned Value: * The new file descriptor is returned on success; -1 (ERROR) is returned * on any failure the errno value set appropriately. * ****************************************************************************/ int open(FAR const char *path, int oflags, ...) { va_list ap; int fd; /* open() is a cancellation point */ enter_cancellation_point(); /* Let nx_vopen() do most of the work */ va_start(ap, oflags); fd = nx_vopen(path, oflags, ap); va_end(ap); /* Set the errno value if any errors were reported by nx_open() */ if (fd < 0) { set_errno(-fd); fd = ERROR; } leave_cancellation_point(); return fd; }
738661.c
/* * Copyright (C) Roman Arutyunyan */ #include <ngx_config.h> #include <ngx_core.h> #include <ngx_http.h> #include <nginx.h> #include "ngx_rtmp.h" #include "ngx_rtmp_version.h" #include "ngx_rtmp_live_module.h" #include "ngx_rtmp_play_module.h" #include "ngx_rtmp_codec_module.h" static ngx_int_t ngx_rtmp_stat_init_process(ngx_cycle_t *cycle); static char *ngx_rtmp_stat(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); static ngx_int_t ngx_rtmp_stat_postconfiguration(ngx_conf_t *cf); static void * ngx_rtmp_stat_create_loc_conf(ngx_conf_t *cf); static char * ngx_rtmp_stat_merge_loc_conf(ngx_conf_t *cf, void *parent, void *child); static time_t start_time; #define NGX_RTMP_STAT_ALL 0xff #define NGX_RTMP_STAT_GLOBAL 0x01 #define NGX_RTMP_STAT_LIVE 0x02 #define NGX_RTMP_STAT_CLIENTS 0x04 #define NGX_RTMP_STAT_PLAY 0x08 /* * global: stat-{bufs-{total,free,used}, total bytes in/out, bw in/out} - cscf */ typedef struct { ngx_uint_t stat; ngx_str_t stylesheet; } ngx_rtmp_stat_loc_conf_t; static ngx_conf_bitmask_t ngx_rtmp_stat_masks[] = { { ngx_string("all"), NGX_RTMP_STAT_ALL }, { ngx_string("global"), NGX_RTMP_STAT_GLOBAL }, { ngx_string("live"), NGX_RTMP_STAT_LIVE }, { ngx_string("clients"), NGX_RTMP_STAT_CLIENTS }, { ngx_null_string, 0 } }; static ngx_command_t ngx_rtmp_stat_commands[] = { { ngx_string("rtmp_stat"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_1MORE, ngx_rtmp_stat, NGX_HTTP_LOC_CONF_OFFSET, offsetof(ngx_rtmp_stat_loc_conf_t, stat), ngx_rtmp_stat_masks }, { ngx_string("rtmp_stat_stylesheet"), NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, ngx_conf_set_str_slot, NGX_HTTP_LOC_CONF_OFFSET, offsetof(ngx_rtmp_stat_loc_conf_t, stylesheet), NULL }, ngx_null_command }; static ngx_http_module_t ngx_rtmp_stat_module_ctx = { NULL, /* preconfiguration */ ngx_rtmp_stat_postconfiguration, /* postconfiguration */ NULL, /* create main configuration */ NULL, /* init main configuration */ NULL, /* create server configuration */ NULL, /* merge server configuration */ ngx_rtmp_stat_create_loc_conf, /* create location configuration */ ngx_rtmp_stat_merge_loc_conf, /* merge location configuration */ }; ngx_module_t ngx_rtmp_stat_module = { NGX_MODULE_V1, &ngx_rtmp_stat_module_ctx, /* module context */ ngx_rtmp_stat_commands, /* module directives */ NGX_HTTP_MODULE, /* module type */ NULL, /* init master */ NULL, /* init module */ ngx_rtmp_stat_init_process, /* init process */ NULL, /* init thread */ NULL, /* exit thread */ NULL, /* exit process */ NULL, /* exit master */ NGX_MODULE_V1_PADDING }; #define NGX_RTMP_STAT_BUFSIZE 256 static ngx_int_t ngx_rtmp_stat_init_process(ngx_cycle_t *cycle) { /* * HTTP process initializer is called * after event module initializer * so we can run posted events here */ ngx_event_process_posted(cycle, &ngx_rtmp_init_queue); return NGX_OK; } /* ngx_escape_html does not escape characters out of ASCII range * which are bad for xslt */ static void * ngx_rtmp_stat_escape(ngx_http_request_t *r, void *data, size_t len) { u_char *p, *np; void *new_data; size_t n; p = data; for (n = 0; n < len; ++n, ++p) { if (*p < 0x20 || *p >= 0x7f) { break; } } if (n == len) { return data; } new_data = ngx_palloc(r->pool, len); if (new_data == NULL) { return NULL; } p = data; np = new_data; for (n = 0; n < len; ++n, ++p, ++np) { *np = (*p < 0x20 || *p >= 0x7f) ? (u_char) ' ' : *p; } return new_data; } #if (NGX_WIN32) /* * Fix broken MSVC memcpy optimization for 4-byte data * when this function is inlined */ __declspec(noinline) #endif static void ngx_rtmp_stat_output(ngx_http_request_t *r, ngx_chain_t ***lll, void *data, size_t len, ngx_uint_t escape) { ngx_chain_t *cl; ngx_buf_t *b; size_t real_len; if (len == 0) { return; } if (escape) { data = ngx_rtmp_stat_escape(r, data, len); if (data == NULL) { return; } } real_len = escape ? len + ngx_escape_html(NULL, data, len) : len; cl = **lll; if (cl && cl->buf->last + real_len > cl->buf->end) { *lll = &cl->next; } if (**lll == NULL) { cl = ngx_alloc_chain_link(r->pool); if (cl == NULL) { return; } b = ngx_create_temp_buf(r->pool, ngx_max(NGX_RTMP_STAT_BUFSIZE, real_len)); if (b == NULL || b->pos == NULL) { return; } cl->next = NULL; cl->buf = b; **lll = cl; } b = (**lll)->buf; if (escape) { b->last = (u_char *)ngx_escape_html(b->last, data, len); } else { b->last = ngx_cpymem(b->last, data, len); } } /* These shortcuts assume 2 variables exist in current context: * ngx_http_request_t *r * ngx_chain_t ***lll */ /* plain data */ #define NGX_RTMP_STAT(data, len) ngx_rtmp_stat_output(r, lll, data, len, 0) /* escaped data */ #define NGX_RTMP_STAT_E(data, len) ngx_rtmp_stat_output(r, lll, data, len, 1) /* literal */ #define NGX_RTMP_STAT_L(s) NGX_RTMP_STAT((s), sizeof(s) - 1) /* ngx_str_t */ #define NGX_RTMP_STAT_S(s) NGX_RTMP_STAT((s)->data, (s)->len) /* escaped ngx_str_t */ #define NGX_RTMP_STAT_ES(s) NGX_RTMP_STAT_E((s)->data, (s)->len) /* C string */ #define NGX_RTMP_STAT_CS(s) NGX_RTMP_STAT((s), ngx_strlen(s)) /* escaped C string */ #define NGX_RTMP_STAT_ECS(s) NGX_RTMP_STAT_E((s), ngx_strlen(s)) #define NGX_RTMP_STAT_BW 0x01 #define NGX_RTMP_STAT_BYTES 0x02 #define NGX_RTMP_STAT_BW_BYTES 0x03 #define NGX_RTMP_STAT_FPS 0x04 static void ngx_rtmp_stat_bw(ngx_http_request_t *r, ngx_chain_t ***lll, ngx_rtmp_bandwidth_t *bw, char *name, ngx_uint_t flags) { u_char buf[NGX_INT64_LEN + 9]; ngx_rtmp_update_stat(bw); if (flags & NGX_RTMP_STAT_BW) { NGX_RTMP_STAT_L("<bw_"); NGX_RTMP_STAT_CS(name); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), ">%uL</bw_", bw->bandwidth * 8) - buf); NGX_RTMP_STAT_CS(name); NGX_RTMP_STAT_L(">\r\n"); } if (flags & NGX_RTMP_STAT_BYTES) { NGX_RTMP_STAT_L("<bytes_"); NGX_RTMP_STAT_CS(name); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), ">%uL</bytes_", bw->bytes) - buf); NGX_RTMP_STAT_CS(name); NGX_RTMP_STAT_L(">\r\n"); } if (flags & NGX_RTMP_STAT_FPS) { NGX_RTMP_STAT_L("<fps_"); NGX_RTMP_STAT_CS(name); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), ">%uL</fps_", bw->fps) - buf); NGX_RTMP_STAT_CS(name); NGX_RTMP_STAT_L(">\r\n"); NGX_RTMP_STAT_L("<frames_"); NGX_RTMP_STAT_CS(name); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), ">%uL</frames_", bw->frames) - buf); NGX_RTMP_STAT_CS(name); NGX_RTMP_STAT_L(">\r\n"); } } #ifdef NGX_RTMP_POOL_DEBUG static void ngx_rtmp_stat_get_pool_size(ngx_pool_t *pool, ngx_uint_t *nlarge, ngx_uint_t *size) { ngx_pool_large_t *l; ngx_pool_t *p, *n; *nlarge = 0; for (l = pool->large; l; l = l->next) { ++*nlarge; } *size = 0; for (p = pool, n = pool->d.next; /* void */; p = n, n = n->d.next) { *size += (p->d.last - (u_char *)p); if (n == NULL) { break; } } } static void ngx_rtmp_stat_dump_pool(ngx_http_request_t *r, ngx_chain_t ***lll, ngx_pool_t *pool) { ngx_uint_t nlarge, size; u_char buf[NGX_INT_T_LEN]; size = 0; nlarge = 0; ngx_rtmp_stat_get_pool_size(pool, &nlarge, &size); NGX_RTMP_STAT_L("<pool><nlarge>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", nlarge) - buf); NGX_RTMP_STAT_L("</nlarge><size>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", size) - buf); NGX_RTMP_STAT_L("</size></pool>\r\n"); } #endif static void ngx_rtmp_stat_client(ngx_http_request_t *r, ngx_chain_t ***lll, ngx_rtmp_session_t *s) { u_char buf[NGX_INT_T_LEN]; #ifdef NGX_RTMP_POOL_DEBUG ngx_rtmp_stat_dump_pool(r, lll, s->connection->pool); #endif NGX_RTMP_STAT_L("<id>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", (ngx_uint_t) s->connection->number) - buf); NGX_RTMP_STAT_L("</id>"); NGX_RTMP_STAT_L("<address>"); NGX_RTMP_STAT_ES(&s->connection->addr_text); NGX_RTMP_STAT_L("</address>"); NGX_RTMP_STAT_L("<time>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%i", (ngx_int_t) (ngx_current_msec - s->epoch)) - buf); NGX_RTMP_STAT_L("</time>"); if (s->flashver.len) { NGX_RTMP_STAT_L("<flashver>"); NGX_RTMP_STAT_ES(&s->flashver); NGX_RTMP_STAT_L("</flashver>"); } if (s->page_url.len) { NGX_RTMP_STAT_L("<pageurl>"); NGX_RTMP_STAT_ES(&s->page_url); NGX_RTMP_STAT_L("</pageurl>"); } if (s->swf_url.len) { NGX_RTMP_STAT_L("<swfurl>"); NGX_RTMP_STAT_ES(&s->swf_url); NGX_RTMP_STAT_L("</swfurl>"); } } static char * ngx_rtmp_stat_get_aac_profile(ngx_uint_t p, ngx_uint_t sbr, ngx_uint_t ps) { switch (p) { case 1: return "Main"; case 2: if (ps) { return "HEv2"; } if (sbr) { return "HE"; } return "LC"; case 3: return "SSR"; case 4: return "LTP"; case 5: return "SBR"; default: return ""; } } static char * ngx_rtmp_stat_get_avc_profile(ngx_uint_t p) { switch (p) { case 66: return "Baseline"; case 77: return "Main"; case 100: return "High"; default: return ""; } } static void ngx_rtmp_stat_live(ngx_http_request_t *r, ngx_chain_t ***lll, ngx_rtmp_live_app_conf_t *lacf) { ngx_rtmp_live_stream_t *stream; ngx_rtmp_codec_ctx_t *codec; ngx_rtmp_live_ctx_t *ctx; ngx_rtmp_session_t *s; ngx_int_t n; ngx_uint_t nclients, total_nclients; u_char buf[NGX_INT_T_LEN]; u_char bbuf[NGX_INT32_LEN]; ngx_rtmp_stat_loc_conf_t *slcf; u_char *cname; if (!lacf->live) { return; } slcf = ngx_http_get_module_loc_conf(r, ngx_rtmp_stat_module); NGX_RTMP_STAT_L("<live>\r\n"); total_nclients = 0; for (n = 0; n < lacf->nbuckets; ++n) { for (stream = lacf->streams[n]; stream; stream = stream->next) { NGX_RTMP_STAT_L("<stream>\r\n"); NGX_RTMP_STAT_L("<name>"); NGX_RTMP_STAT_ECS(stream->name); NGX_RTMP_STAT_L("</name>\r\n"); NGX_RTMP_STAT_L("<time>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%i", (ngx_int_t) (ngx_current_msec - stream->epoch)) - buf); NGX_RTMP_STAT_L("</time>"); ngx_rtmp_stat_bw(r, lll, &stream->bw_in, "in", NGX_RTMP_STAT_BW_BYTES); ngx_rtmp_stat_bw(r, lll, &stream->bw_out, "out", NGX_RTMP_STAT_BW_BYTES); ngx_rtmp_stat_bw(r, lll, &stream->bw_in_audio, "audio", NGX_RTMP_STAT_BW); ngx_rtmp_stat_bw(r, lll, &stream->bw_in_audio, "audio", NGX_RTMP_STAT_FPS); ngx_rtmp_stat_bw(r, lll, &stream->bw_in_video, "video", NGX_RTMP_STAT_BW); ngx_rtmp_stat_bw(r, lll, &stream->bw_in_video, "video", NGX_RTMP_STAT_FPS); nclients = 0; codec = NULL; for (ctx = stream->ctx; ctx; ctx = ctx->next, ++nclients) { s = ctx->session; if (slcf->stat & NGX_RTMP_STAT_CLIENTS) { NGX_RTMP_STAT_L("<client>"); ngx_rtmp_stat_client(r, lll, s); NGX_RTMP_STAT_L("<dropped>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", ctx->ndropped) - buf); NGX_RTMP_STAT_L("</dropped>"); NGX_RTMP_STAT_L("<avsync>"); if (!lacf->interleave) { NGX_RTMP_STAT(bbuf, ngx_snprintf(bbuf, sizeof(bbuf), "%D", ctx->cs[1].timestamp - ctx->cs[0].timestamp) - bbuf); } NGX_RTMP_STAT_L("</avsync>"); NGX_RTMP_STAT_L("<timestamp>"); NGX_RTMP_STAT(bbuf, ngx_snprintf(bbuf, sizeof(bbuf), "%D", s->current_time) - bbuf); NGX_RTMP_STAT_L("</timestamp>"); if (ctx->publishing) { NGX_RTMP_STAT_L("<publishing/>"); } if (ctx->active) { NGX_RTMP_STAT_L("<active/>"); } NGX_RTMP_STAT_L("</client>\r\n"); } if (ctx->publishing) { codec = ngx_rtmp_get_module_ctx(s, ngx_rtmp_codec_module); } } total_nclients += nclients; if (codec) { NGX_RTMP_STAT_L("<meta>"); NGX_RTMP_STAT_L("<video>"); NGX_RTMP_STAT_L("<width>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", codec->width) - buf); NGX_RTMP_STAT_L("</width><height>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", codec->height) - buf); NGX_RTMP_STAT_L("</height><frame_rate>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", codec->frame_rate) - buf); NGX_RTMP_STAT_L("</frame_rate>"); cname = ngx_rtmp_get_video_codec_name(codec->video_codec_id); if (*cname) { NGX_RTMP_STAT_L("<codec>"); NGX_RTMP_STAT_ECS(cname); NGX_RTMP_STAT_L("</codec>"); } if (codec->avc_profile) { NGX_RTMP_STAT_L("<profile>"); NGX_RTMP_STAT_CS( ngx_rtmp_stat_get_avc_profile(codec->avc_profile)); NGX_RTMP_STAT_L("</profile>"); } if (codec->avc_level) { NGX_RTMP_STAT_L("<compat>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", codec->avc_compat) - buf); NGX_RTMP_STAT_L("</compat>"); } if (codec->avc_level) { NGX_RTMP_STAT_L("<level>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%.1f", codec->avc_level / 10.) - buf); NGX_RTMP_STAT_L("</level>"); } NGX_RTMP_STAT_L("</video>"); NGX_RTMP_STAT_L("<audio>"); cname = ngx_rtmp_get_audio_codec_name(codec->audio_codec_id); if (*cname) { NGX_RTMP_STAT_L("<codec>"); NGX_RTMP_STAT_ECS(cname); NGX_RTMP_STAT_L("</codec>"); } if (codec->aac_profile) { NGX_RTMP_STAT_L("<profile>"); NGX_RTMP_STAT_CS( ngx_rtmp_stat_get_aac_profile(codec->aac_profile, codec->aac_sbr, codec->aac_ps)); NGX_RTMP_STAT_L("</profile>"); } if (codec->aac_chan_conf) { NGX_RTMP_STAT_L("<channels>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", codec->aac_chan_conf) - buf); NGX_RTMP_STAT_L("</channels>"); } else if (codec->audio_channels) { NGX_RTMP_STAT_L("<channels>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", codec->audio_channels) - buf); NGX_RTMP_STAT_L("</channels>"); } if (codec->sample_rate) { NGX_RTMP_STAT_L("<sample_rate>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", codec->sample_rate) - buf); NGX_RTMP_STAT_L("</sample_rate>"); } NGX_RTMP_STAT_L("</audio>"); NGX_RTMP_STAT_L("</meta>\r\n"); } NGX_RTMP_STAT_L("<nclients>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", nclients) - buf); NGX_RTMP_STAT_L("</nclients>\r\n"); if (stream->publishing) { NGX_RTMP_STAT_L("<publishing/>\r\n"); } if (stream->active) { NGX_RTMP_STAT_L("<active/>\r\n"); } NGX_RTMP_STAT_L("</stream>\r\n"); } } NGX_RTMP_STAT_L("<nclients>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", total_nclients) - buf); NGX_RTMP_STAT_L("</nclients>\r\n"); NGX_RTMP_STAT_L("</live>\r\n"); } static void ngx_rtmp_stat_play(ngx_http_request_t *r, ngx_chain_t ***lll, ngx_rtmp_play_app_conf_t *pacf) { ngx_rtmp_play_ctx_t *ctx, *sctx; ngx_rtmp_session_t *s; ngx_uint_t n, nclients, total_nclients; u_char buf[NGX_INT_T_LEN]; u_char bbuf[NGX_INT32_LEN]; ngx_rtmp_stat_loc_conf_t *slcf; if (pacf->entries.nelts == 0) { return; } slcf = ngx_http_get_module_loc_conf(r, ngx_rtmp_stat_module); NGX_RTMP_STAT_L("<play>\r\n"); total_nclients = 0; for (n = 0; n < pacf->nbuckets; ++n) { for (ctx = pacf->ctx[n]; ctx; ) { NGX_RTMP_STAT_L("<stream>\r\n"); NGX_RTMP_STAT_L("<name>"); NGX_RTMP_STAT_ECS(ctx->name); NGX_RTMP_STAT_L("</name>\r\n"); nclients = 0; sctx = ctx; for (; ctx; ctx = ctx->next) { if (ngx_strcmp(ctx->name, sctx->name)) { break; } nclients++; s = ctx->session; if (slcf->stat & NGX_RTMP_STAT_CLIENTS) { NGX_RTMP_STAT_L("<client>"); ngx_rtmp_stat_client(r, lll, s); NGX_RTMP_STAT_L("<timestamp>"); NGX_RTMP_STAT(bbuf, ngx_snprintf(bbuf, sizeof(bbuf), "%D", s->current_time) - bbuf); NGX_RTMP_STAT_L("</timestamp>"); NGX_RTMP_STAT_L("</client>\r\n"); } } total_nclients += nclients; NGX_RTMP_STAT_L("<active/>"); NGX_RTMP_STAT_L("<nclients>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", nclients) - buf); NGX_RTMP_STAT_L("</nclients>\r\n"); NGX_RTMP_STAT_L("</stream>\r\n"); } } NGX_RTMP_STAT_L("<nclients>"); NGX_RTMP_STAT(buf, ngx_snprintf(buf, sizeof(buf), "%ui", total_nclients) - buf); NGX_RTMP_STAT_L("</nclients>\r\n"); NGX_RTMP_STAT_L("</play>\r\n"); } static void ngx_rtmp_stat_application(ngx_http_request_t *r, ngx_chain_t ***lll, ngx_rtmp_core_app_conf_t *cacf) { ngx_rtmp_stat_loc_conf_t *slcf; NGX_RTMP_STAT_L("<application>\r\n"); NGX_RTMP_STAT_L("<name>"); NGX_RTMP_STAT_ES(&cacf->name); NGX_RTMP_STAT_L("</name>\r\n"); slcf = ngx_http_get_module_loc_conf(r, ngx_rtmp_stat_module); if (slcf->stat & NGX_RTMP_STAT_LIVE) { ngx_rtmp_stat_live(r, lll, cacf->app_conf[ngx_rtmp_live_module.ctx_index]); } if (slcf->stat & NGX_RTMP_STAT_PLAY) { ngx_rtmp_stat_play(r, lll, cacf->app_conf[ngx_rtmp_play_module.ctx_index]); } NGX_RTMP_STAT_L("</application>\r\n"); } static void ngx_rtmp_stat_server(ngx_http_request_t *r, ngx_chain_t ***lll, ngx_rtmp_core_srv_conf_t *cscf) { ngx_rtmp_core_app_conf_t **cacf; size_t n; NGX_RTMP_STAT_L("<server>\r\n"); #ifdef NGX_RTMP_POOL_DEBUG ngx_rtmp_stat_dump_pool(r, lll, cscf->pool); #endif cacf = cscf->applications.elts; for (n = 0; n < cscf->applications.nelts; ++n, ++cacf) { ngx_rtmp_stat_application(r, lll, *cacf); } NGX_RTMP_STAT_L("</server>\r\n"); } static ngx_int_t ngx_rtmp_stat_handler(ngx_http_request_t *r) { ngx_rtmp_stat_loc_conf_t *slcf; ngx_rtmp_core_main_conf_t *cmcf; ngx_rtmp_core_srv_conf_t **cscf; ngx_chain_t *cl, *l, **ll, ***lll; size_t n; off_t len; static u_char tbuf[NGX_TIME_T_LEN]; static u_char nbuf[NGX_INT_T_LEN]; slcf = ngx_http_get_module_loc_conf(r, ngx_rtmp_stat_module); if (slcf->stat == 0) { return NGX_DECLINED; } cmcf = ngx_rtmp_core_main_conf; if (cmcf == NULL) { goto error; } cl = NULL; ll = &cl; lll = &ll; NGX_RTMP_STAT_L("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\r\n"); if (slcf->stylesheet.len) { NGX_RTMP_STAT_L("<?xml-stylesheet type=\"text/xsl\" href=\""); NGX_RTMP_STAT_ES(&slcf->stylesheet); NGX_RTMP_STAT_L("\" ?>\r\n"); } NGX_RTMP_STAT_L("<rtmp>\r\n"); #ifdef NGINX_VERSION NGX_RTMP_STAT_L("<nginx_version>" NGINX_VERSION "</nginx_version>\r\n"); #endif #ifdef NGINX_RTMP_VERSION NGX_RTMP_STAT_L("<nginx_rtmp_version>" NGINX_RTMP_VERSION "</nginx_rtmp_version>\r\n"); #endif #ifdef NGX_COMPILER NGX_RTMP_STAT_L("<compiler>" NGX_COMPILER "</compiler>\r\n"); #endif NGX_RTMP_STAT_L("<built>" __DATE__ " " __TIME__ "</built>\r\n"); NGX_RTMP_STAT_L("<pid>"); NGX_RTMP_STAT(nbuf, ngx_snprintf(nbuf, sizeof(nbuf), "%ui", (ngx_uint_t) ngx_getpid()) - nbuf); NGX_RTMP_STAT_L("</pid>\r\n"); NGX_RTMP_STAT_L("<uptime>"); NGX_RTMP_STAT(tbuf, ngx_snprintf(tbuf, sizeof(tbuf), "%T", ngx_cached_time->sec - start_time) - tbuf); NGX_RTMP_STAT_L("</uptime>\r\n"); NGX_RTMP_STAT_L("<naccepted>"); NGX_RTMP_STAT(nbuf, ngx_snprintf(nbuf, sizeof(nbuf), "%ui", ngx_rtmp_naccepted) - nbuf); NGX_RTMP_STAT_L("</naccepted>\r\n"); ngx_rtmp_stat_bw(r, lll, &ngx_rtmp_bw_in, "in", NGX_RTMP_STAT_BW_BYTES); ngx_rtmp_stat_bw(r, lll, &ngx_rtmp_bw_out, "out", NGX_RTMP_STAT_BW_BYTES); cscf = cmcf->servers.elts; for (n = 0; n < cmcf->servers.nelts; ++n, ++cscf) { ngx_rtmp_stat_server(r, lll, *cscf); } NGX_RTMP_STAT_L("</rtmp>\r\n"); len = 0; for (l = cl; l; l = l->next) { len += (l->buf->last - l->buf->pos); } ngx_str_set(&r->headers_out.content_type, "text/xml"); r->headers_out.content_length_n = len; r->headers_out.status = NGX_HTTP_OK; ngx_http_send_header(r); (*ll)->buf->last_buf = 1; return ngx_http_output_filter(r, cl); error: r->headers_out.status = NGX_HTTP_INTERNAL_SERVER_ERROR; r->headers_out.content_length_n = 0; return ngx_http_send_header(r); } static void * ngx_rtmp_stat_create_loc_conf(ngx_conf_t *cf) { ngx_rtmp_stat_loc_conf_t *conf; conf = ngx_pcalloc(cf->pool, sizeof(ngx_rtmp_stat_loc_conf_t)); if (conf == NULL) { return NULL; } conf->stat = 0; return conf; } static char * ngx_rtmp_stat_merge_loc_conf(ngx_conf_t *cf, void *parent, void *child) { ngx_rtmp_stat_loc_conf_t *prev = parent; ngx_rtmp_stat_loc_conf_t *conf = child; ngx_conf_merge_bitmask_value(conf->stat, prev->stat, 0); ngx_conf_merge_str_value(conf->stylesheet, prev->stylesheet, ""); return NGX_CONF_OK; } static char * ngx_rtmp_stat(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) { ngx_http_core_loc_conf_t *clcf; clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module); clcf->handler = ngx_rtmp_stat_handler; return ngx_conf_set_bitmask_slot(cf, cmd, conf); } static ngx_int_t ngx_rtmp_stat_postconfiguration(ngx_conf_t *cf) { start_time = ngx_cached_time->sec; return NGX_OK; }
119659.c
// Room: /city/milin.c inherit ROOM; void create() { set("short", "青竹林"); set("long", @LONG 這是一片茂密的青竹林,一走進來,你彷彿迷失了方向。 LONG ); set("exits", ([ "east" : __FILE__, "west" : "/d/city/ml4", "south" : "/d/city/dongmen", "north" : __FILE__, ])); set("outdoors", "city"); set("coor/x", 50); set("coor/y", 20); set("coor/z", 0); setup(); replace_program(ROOM); }
616152.c
/***************************************************************************** * ugBASIC - an isomorphic BASIC language compiler for retrocomputers * ***************************************************************************** * Copyright 2021 Marco Spedaletti (asimov@mclink.it) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *---------------------------------------------------------------------------- * Concesso in licenza secondo i termini della Licenza Apache, versione 2.0 * (la "Licenza"); è proibito usare questo file se non in conformità alla * Licenza. Una copia della Licenza è disponibile all'indirizzo: * * http://www.apache.org/licenses/LICENSE-2.0 * * Se non richiesto dalla legislazione vigente o concordato per iscritto, * il software distribuito nei termini della Licenza è distribuito * "COSÌ COM'È", SENZA GARANZIE O CONDIZIONI DI ALCUN TIPO, esplicite o * implicite. Consultare la Licenza per il testo specifico che regola le * autorizzazioni e le limitazioni previste dalla medesima. ****************************************************************************/ /**************************************************************************** * INCLUDE SECTION ****************************************************************************/ #include "../../ugbc.h" /**************************************************************************** * CODE SECTION ****************************************************************************/ /** * @brief Emit ASM code for <b>SCREEN HORIZONTAL SCROLL [int]x</b> * * This function outputs an assembly code capable of performing a * hardware scroll of the screen. The scroll is always in the direction * from right to left, so with a _displacement of 0 the screen is exactly as * it would be without scrolling while with the value 7 you would have a scroll * of 7 pixels to left. This version is used when a direct integer is used. * * @param _environment Current calling environment * @param _displacement Horizontal offset in pixels (0-7) */ void screen_horizontal_scroll( Environment * _environment, int _displacement ) { } /** * @brief Emit ASM code for <b>SCREEN HORIZONTAL SCROLL [expression[</b> * * This function outputs an assembly code capable of performing a * hardware scroll of the screen. The scroll is always in the direction * from right to left, so with a _displacement of 0 the screen is exactly as * it would be without scrolling while with the value 7 you would have a scroll * of 7 pixels to left. This version is used when an expression is used. * * @param _environment Current calling environment * @param _displacement Horizontal offset in pixels (0-7) */ void screen_horizontal_scroll_var( Environment * _environment, char * _displacement ) { }
490288.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE187_Partial_Comparison__wchar_t_fromFile_ncmp_user_pw_52b.c Label Definition File: CWE187_Partial_Comparison.label.xml Template File: sources-sinks-52b.tmpl.c */ /* * @description * CWE: 187 Partial Comparison * BadSource: fromFile Read input from a file * GoodSource: Provide a matching password * Sinks: ncmp_user_pw * GoodSink: Compare the 2 passwords correctly * BadSink : use wcsncmp() to do password match, but use the length of the user password * Flow Variant: 52 Data flow: data passed as an argument from one function to another to another in three different source files * * */ #include "std_testcase.h" #include <wchar.h> #define PASSWORD L"Password1234" /* PASSWORD_SZ must equal the length of PASSWORD */ #define PASSWORD_SZ wcslen(PASSWORD) #ifdef _WIN32 # define FOPEN _wfopen #else /* fopen is used on unix-based OSs */ # define FOPEN fopen #endif #ifndef OMITBAD /* bad function declaration */ void CWE187_Partial_Comparison__wchar_t_fromFile_ncmp_user_pw_52c_bad_sink(wchar_t * data); void CWE187_Partial_Comparison__wchar_t_fromFile_ncmp_user_pw_52b_bad_sink(wchar_t * data) { CWE187_Partial_Comparison__wchar_t_fromFile_ncmp_user_pw_52c_bad_sink(data); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void CWE187_Partial_Comparison__wchar_t_fromFile_ncmp_user_pw_52c_goodG2B_sink(wchar_t * data); void CWE187_Partial_Comparison__wchar_t_fromFile_ncmp_user_pw_52b_goodG2B_sink(wchar_t * data) { CWE187_Partial_Comparison__wchar_t_fromFile_ncmp_user_pw_52c_goodG2B_sink(data); } /* goodB2G uses the BadSource with the GoodSink */ void CWE187_Partial_Comparison__wchar_t_fromFile_ncmp_user_pw_52c_goodB2G_sink(wchar_t * data); void CWE187_Partial_Comparison__wchar_t_fromFile_ncmp_user_pw_52b_goodB2G_sink(wchar_t * data) { CWE187_Partial_Comparison__wchar_t_fromFile_ncmp_user_pw_52c_goodB2G_sink(data); } #endif /* OMITGOOD */
631614.c
/* * POK header * * The following file is a part of the POK project. Any modification should * be made according to the POK licence. You CANNOT use this file or a part * of a file for your own project. * * For more information on the POK licence, please see our LICENCE FILE * * Please follow the coding guidelines described in doc/CODING_GUIDELINES * * Copyright (c) 2007-2022 POK team */ #include <core/dependencies.h> #include <core/syscall.h> #include <types.h> pok_ret_t pok_thread_period() { return pok_syscall2(POK_SYSCALL_THREAD_PERIOD, (uint32_t)NULL, (uint32_t)NULL); }
471446.c
/* * File: lexer.c */ #include "c.h" //#### supensa ### /* int check_newline () { register int c; //register int token; //obs //entramos aqui porque encontramos um '\n' while (1) { //Entramos nessa função porque encontramos um '\n'. lineno++; //printf(" [LF1] "); //pega mais um depois do '\n' c = getc (finput); //se o que segue o '\n' for um espaço, deixaremos o skip_white_space tratar o espaço. if (c == ' ' || c == '\t') { return (int) c; } //se for quanquer outra coisa também deixaremos o skip_white_space tratar return (int) c; if ( c == '#' ) { //Skip whitespace after the #. while (1) { c = getc (finput); if ( !(c == ' ' || c == '\t') ) break; } //If the # is the only nonwhite char on the line, //just ignore it. Check the new newline. if (c == '\n') continue; //encontramos algum char válido após #. //Something follows the #; read a token. ungetc (c, finput); //token = yylex (); //#bugbug: // ?? O que fizemos com esse token ?? //skip the rest of this line. while ((c = getc (finput)) != '\n'); }else{ // If no #, unread the character, // except don't bother if it is whitespace. //se não é #, retorna ao encontrar espaço e //devolve se encontrar outra coisa. depois retorne também. //return (int) c; //if (c == ' ' || c == '\t') //{ // return (int) c; // //} else { // // ungetc ( c, finput ); // return (int) -1; //}; }; };//while }; */ int skip_white_space (){ register int c; register int inside; begin: c = getc(finput); for (;;) { switch (c) { // ## spaces ## //se encontramos um espaço, pegamos o próximo e saímos do switch //para reentrarmos no switch case ' ': case '\t': case '\f': case '\r': case '\b': c = getc(finput); break; // ## new lines ## case '\n': lineno++; //próximo. c = getc (finput); break; // ## comments ## // '/' //(#importante: Isso pode ser a primeira barra do comentário ou uma divisão.) case '/': c = getc(finput); //#### inicia um comentário de uma linha #### //Aqui encontramos a segunda barra de dias consecutivas. //single line comments. if ( c == '/' ) { while (1) { c = getc(finput); //quando alinha acabar, //apenas saímos do switch //sairemos com '\n' //??? e se chegarmos ao fim do arquivo ??? #todo if( c == '\n') { //acho que isso só sai do while. break; } }; //isso sai do switch break; }; //#### inicia um comentário de múltiplas linhas #### //#importante //excluindo os casos acima, então significa que nossa barra não tinha nada a ver com comentário //lembrando que a barra aparecei depois de um espaço em branco. //por enquanto vamos dizer que algo está errado com essa barra, //printf("skip_white_space: todo: depois da barra / ."); //exit(1); if (c == '*') { c = getc(finput); inside = 1; while (inside) { if (c == '*') { //sequência de ************** while (c == '*') c = getc(finput); //se logo em seguida do * tiver uma barra /. if (c == '/') { //fim do comentário //sai do while ... com alguma coisa em c. inside = 0; //c = getc(finput); //break; //sai do while. //begin: ?? //Ao fim de um comentário /* ... */ //podemos ter espaços tabs e talvez outros comentários. goto begin; } // se vamos pular mudar de linha dentro do comentário. }else if (c == '\n'){ //precisamos contar. lineno++; //printf(" [LF2] "); c = getc(finput); //?? para onde vamos?? //precisamos continuar no while até encontrarmos a barra /. ou o *. }else if (c == EOF || c == '\0'){ //}else if (c == EOF){ eofno++; printf("skip_white_space: unterminated comment in line %d",lineno); exit(1); //default }else{ //isso são letras do comentário. //continuaremos dentro do while(inside) //??#bugbug: mas até quando ?? //temos que contar ou confiar no EOF. c = getc(finput); }; //nothing; }; }; // aqui depois da barra não emcontramos nem o '*' nem o '/' // isso significa que estamos eliminando espaços dentro de uma expressão. // então vamos retornar a barra para que a rotina continue tratando a // expressão. ungetc ( c, finput ); //return (int) '/'; break; //#test // ## ignorando diretivas do preprocessdor '#' ## /* case '#': while(1) { c = getc(finput); //quando acabar a linha, //apenas saímos do switch if( c == '\n' ){ //não precisa contar, pois sairemos do switch e //entraremos no switch novamente agora com \n que será contado na hora apropriada. //lineno++; //printf(" [LF3] "); break; } }; break; */ default: return (int) (c); };//switch }; // for }; /* *************************** * yylex: * Pega o próximo token. * copiando do gcc 0.9 */ int yylex (){ register int c; register char *p; register int value; register int c1; register int number_length = 0; again: //Pega um char da stream de entrada. c = skip_white_space (); switch (c) { case 0: case EOF: eofno++; value = TOKENEOF; printf ("yylex: EOF\n"); break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case '_': p = token_buffer; while (1) { //@todo: limite tamanho do buffer //coloca no buffer. *p = c; p++; c = getc (finput); //Se não for identificador, finalize o buffer. //Devolve o que não batia com a comparação do while. if ( ( isalnum(c) == 0 ) && (c != '_') ) { *p = 0; ungetc ( c, finput ); goto id_ok; } }; id_ok: //Temos um identificador. value = TOKENIDENTIFIER; // ?? Reserved ?? // Determinamos que era um identificador, // Mas vamos ver se ele é uma palavra reservada. // As palavras reservadas podem ser modificadores, tipos // ou palavras chave. if ( strncmp( real_token_buffer, "signed", 6 ) == 0 ) { value = TOKENMODIFIER; modifier_found = MSIGNED; } if ( strncmp( real_token_buffer, "unsigned", 8 ) == 0 ) { value = TOKENMODIFIER; modifier_found = MUNSIGNED; } if ( strncmp( real_token_buffer, "int", 3 ) == 0 ) { value = TOKENTYPE; type_found = TINT; } if ( strncmp( real_token_buffer, "void", 4 ) == 0 ) { value = TOKENTYPE; type_found = TVOID; } if ( strncmp( real_token_buffer, "char", 4 ) == 0 ) { value = TOKENTYPE; type_found = TCHAR; } if ( strncmp( real_token_buffer, "short", 5 ) == 0 ) { value = TOKENTYPE; type_found = TSHORT; } if ( strncmp( real_token_buffer, "long", 4 ) == 0 ) { value = TOKENTYPE; type_found = TLONG; } if ( strncmp( real_token_buffer, "asm", 3 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWASM; } if ( strncmp( real_token_buffer, "goto", 4 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWGOTO; } if ( strncmp( real_token_buffer, "return", 6 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWRETURN; } if ( strncmp( real_token_buffer, "continue", 8 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWCONTINUE; } if ( strncmp( real_token_buffer, "default", 7 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWDEFAULT; } if ( strncmp( real_token_buffer, "case", 4 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWCASE; } if ( strncmp( real_token_buffer, "switch", 6 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWSWITCH; } if ( strncmp( real_token_buffer, "for", 3 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWFOR; } if ( strncmp( real_token_buffer, "do", 2 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWDO; } if ( strncmp( real_token_buffer, "while", 5 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWWHILE; } if ( strncmp( real_token_buffer, "else", 4 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWELSE; } if ( strncmp( real_token_buffer, "if", 2 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWIF; } if ( strncmp( real_token_buffer, "union", 5 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWUNION; } if ( strncmp( real_token_buffer, "struct", 6 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWSTRUCT; } if ( strncmp( real_token_buffer, "enum", 4 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWENUM; } if ( strncmp( real_token_buffer, "sizeof", 6 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWSIZEOF; } if( strncmp( real_token_buffer, "volatile", 8 ) == 0 ){ value = TOKENKEYWORD; keyword_found = KWVOLATILE; } if ( strncmp( real_token_buffer, "inline", 6 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWINLINE; } if ( strncmp( real_token_buffer, "def", 3 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWDEF; } if ( strncmp( real_token_buffer, "static", 6 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWSTATIC; } if ( strncmp( real_token_buffer, "var", 3 ) == 0 ) { value = TOKENKEYWORD; keyword_found = KWVAR; } //... break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': //case '.': p = token_buffer; if( c == '0' ) { //coloca no buffer. *p = c; p++; c = getc(finput); if( c == 'x' || c == 'X' ) { //base = 16; //*p++ = c; //coloca o x. *p = c; p++; while (1) { c = getc (finput); // Se o próximo não for um digito hexadecimal. if ( isxdigit (c) == 0 ) { *p = 0; ungetc ( c, finput ); //fim. value = TOKENCONSTANT; //constant_type_found = //#todo tem que contar. constant_base_found = CONSTANT_BASE_HEX; goto constant_done; } //coloca se é hexa. *p = c; p++; }; } printf ("yylex: FAIL expected x in constant in line %", lineno ); exit (1); }else{ //base = 10. *p++ = c; while(1) { c = getc (finput); //se não é digito. if( isdigit( c ) == 0 ) { //fim. *p = 0; ungetc(c, finput); value = TOKENCONSTANT; //constant_type_found = //#todo tem que contar. constant_base_found = CONSTANT_BASE_DEC; goto constant_done; } //coloca o digito. *p++ = c; } }; constant_done: break; //String case '\"': { c = getc(finput); p = token_buffer; //coloca no token_buffer. while (c != '\"') { //if (c == '\\') //{ // c = readescape (); // if (c < 0) // goto skipnewline; //}else if (c == '\n') // { // lineno++; // } //if (p == token_buffer + maxtoken) // p = extend_token_buffer(p); *p++ = c; //skipnewline: c = getc (finput); };//while //finaliza a string *p++ = 0; //yylval.ttype = build_string (p - token_buffer, token_buffer); //TREE_TYPE (yylval.ttype) = char_array_type_node; //avisa que é uma string ... ela vai estar no token_buffer. //value = STRING; value = TOKENSTRING; break; }; //separators (){}[],.;:? case '(': case ')': case '{': case '}': case '[': case ']': case ',': case '.': case ';': case ':': case '?': p = token_buffer; *p++ = c; *p++ = 0; value = TOKENSEPARATOR; break; //usadas em expressões matemáticas, //#todo: não mudar isso. //@todo: talvez se enviarmos esses chars para o buffer ajude no debug. case '+': case '-': case '&': case '|': case '<': case '>': case '*': case '/': case '%': case '^': case '!': case '=': { combine: switch (c) { case '+': lexer_code = PLUS_EXPR; break; case '-': lexer_code = MINUS_EXPR; break; case '&': lexer_code = BIT_AND_EXPR; break; case '|': lexer_code = BIT_IOR_EXPR; break; case '*': lexer_code = MULT_EXPR; break; case '/': lexer_code = TRUNC_DIV_EXPR; break; case '%': lexer_code = TRUNC_MOD_EXPR; break; case '^': lexer_code = BIT_XOR_EXPR; break; case LSHIFT: lexer_code = LSHIFT_EXPR; break; case RSHIFT: lexer_code = RSHIFT_EXPR; break; case '<': lexer_code = LT_EXPR; break; case '>': lexer_code = GT_EXPR; break; } c1 = getc(finput); if (c1 == '=') { switch (c) { case '<': value = ARITHCOMPARE; lexer_code = LE_EXPR; goto done; case '>': value = ARITHCOMPARE; lexer_code = GE_EXPR; goto done; case '!': value = EQCOMPARE; lexer_code = NE_EXPR; goto done; case '=': value = EQCOMPARE; lexer_code = EQ_EXPR; goto done; } value = ASSIGN; goto done; }else if (c == c1){ switch (c) { case '+': value = PLUSPLUS; goto done; case '-': value = MINUSMINUS; goto done; case '&': value = ANDAND; goto done; case '|': value = OROR; goto done; case '<': c = LSHIFT; goto combine; case '>': c = RSHIFT; goto combine; } }else if ((c == '-') && (c1 == '>')) { value = POINTSAT; goto done; } ungetc (c1, finput); if ((c == '<') || (c == '>')) value = ARITHCOMPARE; else value = c; goto done; }; default: value = c; }; //switch done: return (value); }; /* ************************************************** * lexerInit: * Inicializando o lexer. */ int lexerInit (){ #ifdef GRAMCC_VERBOSE printf("lexerInit: Initializing ...\n"); #endif //number_of_tokens = 0; //current_token = 0; //next_index = 0; // ## line support ## //arquivos de texto começa com a linha 1. lineno = 1; //lexer_lineno = 0; lexer_firstline = 1; //lexer_lastline = 0; //eof++ eofno = 0; lexer_code = 0; int i; maxtoken = MAXTOKEN; for ( i=0; i<MAXTOKEN; i++ ) { real_token_buffer[i] = (char) '\0'; } token_buffer = &real_token_buffer[0]; sprintf ( real_token_buffer, "uninitialized-token-string" ); //... return (int) 0; }; /* //check subsequent int check_subseq ( int c, int a, int b ) { //extern getchar, peekc; if (!peekc) peekc = getchar(); //se for diferente de c, retorna a. //se for igual a c, retorn b. if (peekc != c) return (a); peekc = 0; return (b); }; */ void error ( char *msg ){ printf("error: %s\n", msg); };
456300.c
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * dict.c: Dictionary support */ #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) // List head for garbage collection. Although there can be a reference loop // from partial to dict to partial, we don't need to keep track of the partial, // since it will get freed when the dict is unused and gets freed. static dict_T *first_dict = NULL; /* * Allocate an empty header for a dictionary. */ dict_T * dict_alloc(void) { dict_T *d; d = ALLOC_CLEAR_ONE(dict_T); if (d != NULL) { // Add the dict to the list of dicts for garbage collection. if (first_dict != NULL) first_dict->dv_used_prev = d; d->dv_used_next = first_dict; d->dv_used_prev = NULL; first_dict = d; hash_init(&d->dv_hashtab); d->dv_lock = 0; d->dv_scope = 0; d->dv_refcount = 0; d->dv_copyID = 0; } return d; } /* * dict_alloc() with an ID for alloc_fail(). */ dict_T * dict_alloc_id(alloc_id_T id UNUSED) { #ifdef FEAT_EVAL if (alloc_fail_id == id && alloc_does_fail(sizeof(list_T))) return NULL; #endif return (dict_alloc()); } dict_T * dict_alloc_lock(int lock) { dict_T *d = dict_alloc(); if (d != NULL) d->dv_lock = lock; return d; } /* * Allocate an empty dict for a return value. * Returns OK or FAIL. */ int rettv_dict_alloc(typval_T *rettv) { dict_T *d = dict_alloc_lock(0); if (d == NULL) return FAIL; rettv_dict_set(rettv, d); return OK; } /* * Set a dictionary as the return value */ void rettv_dict_set(typval_T *rettv, dict_T *d) { rettv->v_type = VAR_DICT; rettv->vval.v_dict = d; if (d != NULL) ++d->dv_refcount; } /* * Free a Dictionary, including all non-container items it contains. * Ignores the reference count. */ void dict_free_contents(dict_T *d) { hashtab_free_contents(&d->dv_hashtab); } /* * Clear hashtab "ht" and dict items it contains. */ void hashtab_free_contents(hashtab_T *ht) { int todo; hashitem_T *hi; dictitem_T *di; // Lock the hashtab, we don't want it to resize while freeing items. hash_lock(ht); todo = (int)ht->ht_used; for (hi = ht->ht_array; todo > 0; ++hi) { if (!HASHITEM_EMPTY(hi)) { // Remove the item before deleting it, just in case there is // something recursive causing trouble. di = HI2DI(hi); hash_remove(ht, hi); dictitem_free(di); --todo; } } // The hashtab is still locked, it has to be re-initialized anyway. hash_clear(ht); } static void dict_free_dict(dict_T *d) { // Remove the dict from the list of dicts for garbage collection. if (d->dv_used_prev == NULL) first_dict = d->dv_used_next; else d->dv_used_prev->dv_used_next = d->dv_used_next; if (d->dv_used_next != NULL) d->dv_used_next->dv_used_prev = d->dv_used_prev; vim_free(d); } static void dict_free(dict_T *d) { if (!in_free_unref_items) { dict_free_contents(d); dict_free_dict(d); } } /* * Unreference a Dictionary: decrement the reference count and free it when it * becomes zero. */ void dict_unref(dict_T *d) { if (d != NULL && --d->dv_refcount <= 0) dict_free(d); } /* * Go through the list of dicts and free items without the copyID. * Returns TRUE if something was freed. */ int dict_free_nonref(int copyID) { dict_T *dd; int did_free = FALSE; for (dd = first_dict; dd != NULL; dd = dd->dv_used_next) if ((dd->dv_copyID & COPYID_MASK) != (copyID & COPYID_MASK)) { // Free the Dictionary and ordinary items it contains, but don't // recurse into Lists and Dictionaries, they will be in the list // of dicts or list of lists. dict_free_contents(dd); did_free = TRUE; } return did_free; } void dict_free_items(int copyID) { dict_T *dd, *dd_next; for (dd = first_dict; dd != NULL; dd = dd_next) { dd_next = dd->dv_used_next; if ((dd->dv_copyID & COPYID_MASK) != (copyID & COPYID_MASK)) dict_free_dict(dd); } } /* * Allocate a Dictionary item. * The "key" is copied to the new item. * Note that the type and value of the item "di_tv" still needs to be * initialized! * Returns NULL when out of memory. */ dictitem_T * dictitem_alloc(char_u *key) { dictitem_T *di; di = alloc(offsetof(dictitem_T, di_key) + STRLEN(key) + 1); if (di != NULL) { STRCPY(di->di_key, key); di->di_flags = DI_FLAGS_ALLOC; di->di_tv.v_lock = 0; } return di; } /* * Make a copy of a Dictionary item. */ static dictitem_T * dictitem_copy(dictitem_T *org) { dictitem_T *di; di = alloc(offsetof(dictitem_T, di_key) + STRLEN(org->di_key) + 1); if (di != NULL) { STRCPY(di->di_key, org->di_key); di->di_flags = DI_FLAGS_ALLOC; copy_tv(&org->di_tv, &di->di_tv); } return di; } /* * Remove item "item" from Dictionary "dict" and free it. */ void dictitem_remove(dict_T *dict, dictitem_T *item) { hashitem_T *hi; hi = hash_find(&dict->dv_hashtab, item->di_key); if (HASHITEM_EMPTY(hi)) internal_error("dictitem_remove()"); else hash_remove(&dict->dv_hashtab, hi); dictitem_free(item); } /* * Free a dict item. Also clears the value. */ void dictitem_free(dictitem_T *item) { clear_tv(&item->di_tv); if (item->di_flags & DI_FLAGS_ALLOC) vim_free(item); } /* * Make a copy of dict "d". Shallow if "deep" is FALSE. * The refcount of the new dict is set to 1. * See item_copy() for "copyID". * Returns NULL when out of memory. */ dict_T * dict_copy(dict_T *orig, int deep, int copyID) { dict_T *copy; dictitem_T *di; int todo; hashitem_T *hi; if (orig == NULL) return NULL; copy = dict_alloc(); if (copy != NULL) { if (copyID != 0) { orig->dv_copyID = copyID; orig->dv_copydict = copy; } todo = (int)orig->dv_hashtab.ht_used; for (hi = orig->dv_hashtab.ht_array; todo > 0 && !got_int; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; di = dictitem_alloc(hi->hi_key); if (di == NULL) break; if (deep) { if (item_copy(&HI2DI(hi)->di_tv, &di->di_tv, deep, copyID) == FAIL) { vim_free(di); break; } } else copy_tv(&HI2DI(hi)->di_tv, &di->di_tv); if (dict_add(copy, di) == FAIL) { dictitem_free(di); break; } } } ++copy->dv_refcount; if (todo > 0) { dict_unref(copy); copy = NULL; } } return copy; } /* * Add item "item" to Dictionary "d". * Returns FAIL when out of memory and when key already exists. */ int dict_add(dict_T *d, dictitem_T *item) { return hash_add(&d->dv_hashtab, item->di_key); } /* * Add a number or special entry to dictionary "d". * Returns FAIL when out of memory and when key already exists. */ static int dict_add_number_special(dict_T *d, char *key, varnumber_T nr, vartype_T vartype) { dictitem_T *item; item = dictitem_alloc((char_u *)key); if (item == NULL) return FAIL; item->di_tv.v_type = vartype; item->di_tv.vval.v_number = nr; if (dict_add(d, item) == FAIL) { dictitem_free(item); return FAIL; } return OK; } /* * Add a number entry to dictionary "d". * Returns FAIL when out of memory and when key already exists. */ int dict_add_number(dict_T *d, char *key, varnumber_T nr) { return dict_add_number_special(d, key, nr, VAR_NUMBER); } /* * Add a special entry to dictionary "d". * Returns FAIL when out of memory and when key already exists. */ int dict_add_bool(dict_T *d, char *key, varnumber_T nr) { return dict_add_number_special(d, key, nr, VAR_BOOL); } /* * Add a string entry to dictionary "d". * Returns FAIL when out of memory and when key already exists. */ int dict_add_string(dict_T *d, char *key, char_u *str) { return dict_add_string_len(d, key, str, -1); } /* * Add a string entry to dictionary "d". * "str" will be copied to allocated memory. * When "len" is -1 use the whole string, otherwise only this many bytes. * Returns FAIL when out of memory and when key already exists. */ int dict_add_string_len(dict_T *d, char *key, char_u *str, int len) { dictitem_T *item; char_u *val = NULL; item = dictitem_alloc((char_u *)key); if (item == NULL) return FAIL; item->di_tv.v_type = VAR_STRING; if (str != NULL) { if (len == -1) val = vim_strsave(str); else val = vim_strnsave(str, len); } item->di_tv.vval.v_string = val; if (dict_add(d, item) == FAIL) { dictitem_free(item); return FAIL; } return OK; } /* * Add a list entry to dictionary "d". * Returns FAIL when out of memory and when key already exists. */ int dict_add_list(dict_T *d, char *key, list_T *list) { dictitem_T *item; item = dictitem_alloc((char_u *)key); if (item == NULL) return FAIL; item->di_tv.v_type = VAR_LIST; item->di_tv.vval.v_list = list; ++list->lv_refcount; if (dict_add(d, item) == FAIL) { dictitem_free(item); return FAIL; } return OK; } /* * Add a typval_T entry to dictionary "d". * Returns FAIL when out of memory and when key already exists. */ int dict_add_tv(dict_T *d, char *key, typval_T *tv) { dictitem_T *item; item = dictitem_alloc((char_u *)key); if (item == NULL) return FAIL; copy_tv(tv, &item->di_tv); if (dict_add(d, item) == FAIL) { dictitem_free(item); return FAIL; } return OK; } /* * Add a callback to dictionary "d". * Returns FAIL when out of memory and when key already exists. */ int dict_add_callback(dict_T *d, char *key, callback_T *cb) { dictitem_T *item; item = dictitem_alloc((char_u *)key); if (item == NULL) return FAIL; put_callback(cb, &item->di_tv); if (dict_add(d, item) == FAIL) { dictitem_free(item); return FAIL; } return OK; } /* * Initializes "iter" for iterating over dictionary items with * dict_iterate_next(). * If "var" is not a Dict or an empty Dict then there will be nothing to * iterate over, no error is given. * NOTE: The dictionary must not change until iterating is finished! */ void dict_iterate_start(typval_T *var, dict_iterator_T *iter) { if (var->v_type != VAR_DICT || var->vval.v_dict == NULL) iter->dit_todo = 0; else { dict_T *d = var->vval.v_dict; iter->dit_todo = d->dv_hashtab.ht_used; iter->dit_hi = d->dv_hashtab.ht_array; } } /* * Iterate over the items referred to by "iter". It should be initialized with * dict_iterate_start(). * Returns a pointer to the key. * "*tv_result" is set to point to the value for that key. * If there are no more items, NULL is returned. */ char_u * dict_iterate_next(dict_iterator_T *iter, typval_T **tv_result) { dictitem_T *di; char_u *result; if (iter->dit_todo == 0) return NULL; while (HASHITEM_EMPTY(iter->dit_hi)) ++iter->dit_hi; di = HI2DI(iter->dit_hi); result = di->di_key; *tv_result = &di->di_tv; --iter->dit_todo; ++iter->dit_hi; return result; } /* * Add a dict entry to dictionary "d". * Returns FAIL when out of memory and when key already exists. */ int dict_add_dict(dict_T *d, char *key, dict_T *dict) { dictitem_T *item; item = dictitem_alloc((char_u *)key); if (item == NULL) return FAIL; item->di_tv.v_type = VAR_DICT; item->di_tv.vval.v_dict = dict; ++dict->dv_refcount; if (dict_add(d, item) == FAIL) { dictitem_free(item); return FAIL; } return OK; } /* * Get the number of items in a Dictionary. */ long dict_len(dict_T *d) { if (d == NULL) return 0L; return (long)d->dv_hashtab.ht_used; } /* * Find item "key[len]" in Dictionary "d". * If "len" is negative use strlen(key). * Returns NULL when not found. */ dictitem_T * dict_find(dict_T *d, char_u *key, int len) { #define AKEYLEN 200 char_u buf[AKEYLEN]; char_u *akey; char_u *tofree = NULL; hashitem_T *hi; if (d == NULL) return NULL; if (len < 0) akey = key; else if (len >= AKEYLEN) { tofree = akey = vim_strnsave(key, len); if (akey == NULL) return NULL; } else { // Avoid a malloc/free by using buf[]. vim_strncpy(buf, key, len); akey = buf; } hi = hash_find(&d->dv_hashtab, akey); vim_free(tofree); if (HASHITEM_EMPTY(hi)) return NULL; return HI2DI(hi); } /* * Get a typval_T item from a dictionary and copy it into "rettv". * Returns FAIL if the entry doesn't exist or out of memory. */ int dict_get_tv(dict_T *d, char_u *key, typval_T *rettv) { dictitem_T *di; di = dict_find(d, key, -1); if (di == NULL) return FAIL; copy_tv(&di->di_tv, rettv); return OK; } /* * Get a string item from a dictionary. * When "save" is TRUE allocate memory for it. * When FALSE a shared buffer is used, can only be used once! * Returns NULL if the entry doesn't exist or out of memory. */ char_u * dict_get_string(dict_T *d, char_u *key, int save) { dictitem_T *di; char_u *s; di = dict_find(d, key, -1); if (di == NULL) return NULL; s = tv_get_string(&di->di_tv); if (save && s != NULL) s = vim_strsave(s); return s; } /* * Get a number item from a dictionary. * Returns 0 if the entry doesn't exist. */ varnumber_T dict_get_number(dict_T *d, char_u *key) { return dict_get_number_def(d, key, 0); } /* * Get a number item from a dictionary. * Returns "def" if the entry doesn't exist. */ varnumber_T dict_get_number_def(dict_T *d, char_u *key, int def) { dictitem_T *di; di = dict_find(d, key, -1); if (di == NULL) return def; return tv_get_number(&di->di_tv); } /* * Get a number item from a dictionary. * Returns 0 if the entry doesn't exist. * Give an error if the entry is not a number. */ varnumber_T dict_get_number_check(dict_T *d, char_u *key) { dictitem_T *di; di = dict_find(d, key, -1); if (di == NULL) return 0; if (di->di_tv.v_type != VAR_NUMBER) { semsg(_(e_invarg2), tv_get_string(&di->di_tv)); return 0; } return tv_get_number(&di->di_tv); } /* * Return an allocated string with the string representation of a Dictionary. * May return NULL. */ char_u * dict2string(typval_T *tv, int copyID, int restore_copyID) { garray_T ga; int first = TRUE; char_u *tofree; char_u numbuf[NUMBUFLEN]; hashitem_T *hi; char_u *s; dict_T *d; int todo; if ((d = tv->vval.v_dict) == NULL) return NULL; ga_init2(&ga, (int)sizeof(char), 80); ga_append(&ga, '{'); todo = (int)d->dv_hashtab.ht_used; for (hi = d->dv_hashtab.ht_array; todo > 0 && !got_int; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; if (first) first = FALSE; else ga_concat(&ga, (char_u *)", "); tofree = string_quote(hi->hi_key, FALSE); if (tofree != NULL) { ga_concat(&ga, tofree); vim_free(tofree); } ga_concat(&ga, (char_u *)": "); s = echo_string_core(&HI2DI(hi)->di_tv, &tofree, numbuf, copyID, FALSE, restore_copyID, TRUE); if (s != NULL) ga_concat(&ga, s); vim_free(tofree); if (s == NULL || did_echo_string_emsg) break; line_breakcheck(); } } if (todo > 0) { vim_free(ga.ga_data); return NULL; } ga_append(&ga, '}'); ga_append(&ga, NUL); return (char_u *)ga.ga_data; } /* * Get the key for #{key: val} into "tv" and advance "arg". * Return FAIL when there is no valid key. */ static int get_literal_key(char_u **arg, typval_T *tv) { char_u *p; if (!ASCII_ISALNUM(**arg) && **arg != '_' && **arg != '-') return FAIL; for (p = *arg; ASCII_ISALNUM(*p) || *p == '_' || *p == '-'; ++p) ; tv->v_type = VAR_STRING; tv->vval.v_string = vim_strnsave(*arg, p - *arg); *arg = skipwhite(p); return OK; } /* * Allocate a variable for a Dictionary and fill it from "*arg". * "*arg" points to the "{". * "literal" is TRUE for #{key: val} * Return OK or FAIL. Returns NOTDONE for {expr}. */ int eval_dict(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int literal) { int evaluate = evalarg == NULL ? FALSE : evalarg->eval_flags & EVAL_EVALUATE; dict_T *d = NULL; typval_T tvkey; typval_T tv; char_u *key = NULL; dictitem_T *item; char_u *start = skipwhite(*arg + 1); char_u buf[NUMBUFLEN]; int vim9script = in_vim9script(); int had_comma; /* * First check if it's not a curly-braces thing: {expr}. * Must do this without evaluating, otherwise a function may be called * twice. Unfortunately this means we need to call eval1() twice for the * first item. * But {} is an empty Dictionary. */ if (!vim9script && *start != '}') { if (eval1(&start, &tv, NULL) == FAIL) // recursive! return FAIL; if (*start == '}') return NOTDONE; } if (evaluate) { d = dict_alloc(); if (d == NULL) return FAIL; } tvkey.v_type = VAR_UNKNOWN; tv.v_type = VAR_UNKNOWN; *arg = skipwhite_and_linebreak(*arg + 1, evalarg); while (**arg != '}' && **arg != NUL) { if ((literal ? get_literal_key(arg, &tvkey) : eval1(arg, &tvkey, evalarg)) == FAIL) // recursive! goto failret; if (**arg != ':') { if (evaluate) semsg(_(e_missing_dict_colon), *arg); clear_tv(&tvkey); goto failret; } if (evaluate) { key = tv_get_string_buf_chk(&tvkey, buf); if (key == NULL) { // "key" is NULL when tv_get_string_buf_chk() gave an errmsg clear_tv(&tvkey); goto failret; } } if (vim9script && (*arg)[1] != NUL && !VIM_ISWHITE((*arg)[1])) { semsg(_(e_white_after), ":"); clear_tv(&tvkey); goto failret; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg); if (eval1(arg, &tv, evalarg) == FAIL) // recursive! { if (evaluate) clear_tv(&tvkey); goto failret; } if (evaluate) { item = dict_find(d, key, -1); if (item != NULL) { if (evaluate) semsg(_(e_duplicate_key), key); clear_tv(&tvkey); clear_tv(&tv); goto failret; } item = dictitem_alloc(key); if (item != NULL) { item->di_tv = tv; item->di_tv.v_lock = 0; if (dict_add(d, item) == FAIL) dictitem_free(item); } } clear_tv(&tvkey); // the comma must come after the value had_comma = **arg == ','; if (had_comma) { if (vim9script && (*arg)[1] != NUL && !VIM_ISWHITE((*arg)[1])) { semsg(_(e_white_after), ","); goto failret; } *arg = skipwhite(*arg + 1); } // the "}" can be on the next line *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == '}') break; if (!had_comma) { if (evaluate) semsg(_(e_missing_dict_comma), *arg); goto failret; } } if (**arg != '}') { if (evaluate) semsg(_(e_missing_dict_end), *arg); failret: if (d != NULL) dict_free(d); return FAIL; } *arg = skipwhite(*arg + 1); if (evaluate) rettv_dict_set(rettv, d); return OK; } /* * Go over all entries in "d2" and add them to "d1". * When "action" is "error" then a duplicate key is an error. * When "action" is "force" then a duplicate key is overwritten. * Otherwise duplicate keys are ignored ("action" is "keep"). */ void dict_extend(dict_T *d1, dict_T *d2, char_u *action) { dictitem_T *di1; hashitem_T *hi2; int todo; char_u *arg_errmsg = (char_u *)N_("extend() argument"); todo = (int)d2->dv_hashtab.ht_used; for (hi2 = d2->dv_hashtab.ht_array; todo > 0; ++hi2) { if (!HASHITEM_EMPTY(hi2)) { --todo; di1 = dict_find(d1, hi2->hi_key, -1); if (d1->dv_scope != 0) { // Disallow replacing a builtin function in l: and g:. // Check the key to be valid when adding to any scope. if (d1->dv_scope == VAR_DEF_SCOPE && HI2DI(hi2)->di_tv.v_type == VAR_FUNC && var_check_func_name(hi2->hi_key, di1 == NULL)) break; if (!valid_varname(hi2->hi_key)) break; } if (di1 == NULL) { di1 = dictitem_copy(HI2DI(hi2)); if (di1 != NULL && dict_add(d1, di1) == FAIL) dictitem_free(di1); } else if (*action == 'e') { semsg(_("E737: Key already exists: %s"), hi2->hi_key); break; } else if (*action == 'f' && HI2DI(hi2) != di1) { if (var_check_lock(di1->di_tv.v_lock, arg_errmsg, TRUE) || var_check_ro(di1->di_flags, arg_errmsg, TRUE)) break; clear_tv(&di1->di_tv); copy_tv(&HI2DI(hi2)->di_tv, &di1->di_tv); } } } } /* * Return the dictitem that an entry in a hashtable points to. */ dictitem_T * dict_lookup(hashitem_T *hi) { return HI2DI(hi); } /* * Return TRUE when two dictionaries have exactly the same key/values. */ int dict_equal( dict_T *d1, dict_T *d2, int ic, // ignore case for strings int recursive) // TRUE when used recursively { hashitem_T *hi; dictitem_T *item2; int todo; if (d1 == d2) return TRUE; if (dict_len(d1) != dict_len(d2)) return FALSE; if (dict_len(d1) == 0) // empty and NULL dicts are considered equal return TRUE; if (d1 == NULL || d2 == NULL) return FALSE; todo = (int)d1->dv_hashtab.ht_used; for (hi = d1->dv_hashtab.ht_array; todo > 0; ++hi) { if (!HASHITEM_EMPTY(hi)) { item2 = dict_find(d2, hi->hi_key, -1); if (item2 == NULL) return FALSE; if (!tv_equal(&HI2DI(hi)->di_tv, &item2->di_tv, ic, recursive)) return FALSE; --todo; } } return TRUE; } /* * Turn a dict into a list: * "what" == 0: list of keys * "what" == 1: list of values * "what" == 2: list of items */ static void dict_list(typval_T *argvars, typval_T *rettv, int what) { list_T *l2; dictitem_T *di; hashitem_T *hi; listitem_T *li; listitem_T *li2; dict_T *d; int todo; if (argvars[0].v_type != VAR_DICT) { emsg(_(e_dictreq)); return; } if ((d = argvars[0].vval.v_dict) == NULL) return; if (rettv_list_alloc(rettv) == FAIL) return; todo = (int)d->dv_hashtab.ht_used; for (hi = d->dv_hashtab.ht_array; todo > 0; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; di = HI2DI(hi); li = listitem_alloc(); if (li == NULL) break; list_append(rettv->vval.v_list, li); if (what == 0) { // keys() li->li_tv.v_type = VAR_STRING; li->li_tv.v_lock = 0; li->li_tv.vval.v_string = vim_strsave(di->di_key); } else if (what == 1) { // values() copy_tv(&di->di_tv, &li->li_tv); } else { // items() l2 = list_alloc(); li->li_tv.v_type = VAR_LIST; li->li_tv.v_lock = 0; li->li_tv.vval.v_list = l2; if (l2 == NULL) break; ++l2->lv_refcount; li2 = listitem_alloc(); if (li2 == NULL) break; list_append(l2, li2); li2->li_tv.v_type = VAR_STRING; li2->li_tv.v_lock = 0; li2->li_tv.vval.v_string = vim_strsave(di->di_key); li2 = listitem_alloc(); if (li2 == NULL) break; list_append(l2, li2); copy_tv(&di->di_tv, &li2->li_tv); } } } } /* * "items(dict)" function */ void f_items(typval_T *argvars, typval_T *rettv) { dict_list(argvars, rettv, 2); } /* * "keys()" function */ void f_keys(typval_T *argvars, typval_T *rettv) { dict_list(argvars, rettv, 0); } /* * "values(dict)" function */ void f_values(typval_T *argvars, typval_T *rettv) { dict_list(argvars, rettv, 1); } /* * Make each item in the dict readonly (not the value of the item). */ void dict_set_items_ro(dict_T *di) { int todo = (int)di->dv_hashtab.ht_used; hashitem_T *hi; // Set readonly for (hi = di->dv_hashtab.ht_array; todo > 0 ; ++hi) { if (HASHITEM_EMPTY(hi)) continue; --todo; HI2DI(hi)->di_flags |= DI_FLAGS_RO | DI_FLAGS_FIX; } } /* * "has_key()" function */ void f_has_key(typval_T *argvars, typval_T *rettv) { if (argvars[0].v_type != VAR_DICT) { emsg(_(e_dictreq)); return; } if (argvars[0].vval.v_dict == NULL) return; rettv->vval.v_number = dict_find(argvars[0].vval.v_dict, tv_get_string(&argvars[1]), -1) != NULL; } /* * "remove({dict})" function */ void dict_remove(typval_T *argvars, typval_T *rettv, char_u *arg_errmsg) { dict_T *d; char_u *key; dictitem_T *di; if (argvars[2].v_type != VAR_UNKNOWN) semsg(_(e_toomanyarg), "remove()"); else if ((d = argvars[0].vval.v_dict) != NULL && !var_check_lock(d->dv_lock, arg_errmsg, TRUE)) { key = tv_get_string_chk(&argvars[1]); if (key != NULL) { di = dict_find(d, key, -1); if (di == NULL) semsg(_(e_dictkey), key); else if (!var_check_fixed(di->di_flags, arg_errmsg, TRUE) && !var_check_ro(di->di_flags, arg_errmsg, TRUE)) { *rettv = di->di_tv; init_tv(&di->di_tv); dictitem_remove(d, di); } } } } #endif // defined(FEAT_EVAL)
40947.c
/* Test case by Jakub Jelinek <jakub@redhat.com>. */ #include <locale.h> #include <stdio.h> #include <string.h> static int do_test (void) { char q[30]; char *s; setlocale (LC_ALL, ""); printf ("after setlocale (LC_ALL, \"\"): %s\n", setlocale(LC_NUMERIC, NULL)); strcpy (q, "de_DE.UTF-8"); setlocale (LC_NUMERIC, q); printf ("after setlocale (LC_NUMERIC, \"%s\"): %s\n", q, setlocale(LC_NUMERIC, NULL)); strcpy (q, "de_DE.ISO-8859-1"); s = setlocale (LC_NUMERIC, NULL); printf ("after overwriting string: %s\n", s); return strcmp (s, "de_DE.UTF-8") != 0; } #define TEST_FUNCTION do_test () #include "../test-skeleton.c"
598175.c
/* * zsmalloc memory allocator * * Copyright (C) 2011 Nitin Gupta * Copyright (C) 2012, 2013 Minchan Kim * * This code is released using a dual license strategy: BSD/GPL * You can choose the license that better fits your requirements. * * Released under the terms of 3-clause BSD License * Released under the terms of GNU General Public License Version 2.0 */ /* * Following is how we use various fields and flags of underlying * struct page(s) to form a zspage. * * Usage of struct page fields: * page->private: points to zspage * page->freelist(index): links together all component pages of a zspage * For the huge page, this is always 0, so we use this field * to store handle. * page->units: first object offset in a subpage of zspage * * Usage of struct page flags: * PG_private: identifies the first component page * PG_owner_priv_1: identifies the huge component page * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/magic.h> #include <linux/bitops.h> #include <linux/errno.h> #include <linux/highmem.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> #include <linux/cpumask.h> #include <linux/cpu.h> #include <linux/vmalloc.h> #include <linux/preempt.h> #include <linux/spinlock.h> #include <linux/shrinker.h> #include <linux/types.h> #include <linux/debugfs.h> #include <linux/zsmalloc.h> #include <linux/zpool.h> #include <linux/mount.h> #include <linux/migrate.h> #include <linux/pagemap.h> #include <linux/fs.h> #define ZSPAGE_MAGIC 0x58 /* * This must be power of 2 and greater than of equal to sizeof(link_free). * These two conditions ensure that any 'struct link_free' itself doesn't * span more than 1 page which avoids complex case of mapping 2 pages simply * to restore link_free pointer values. */ #define ZS_ALIGN 8 /* * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single) * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N. */ #define ZS_MAX_ZSPAGE_ORDER 2 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) #define ZS_HANDLE_SIZE (sizeof(unsigned long)) /* * Object location (<PFN>, <obj_idx>) is encoded as * as single (unsigned long) handle value. * * Note that object index <obj_idx> starts from 0. * * This is made more complicated by various memory models and PAE. */ #ifndef MAX_POSSIBLE_PHYSMEM_BITS #ifdef MAX_PHYSMEM_BITS #define MAX_POSSIBLE_PHYSMEM_BITS MAX_PHYSMEM_BITS #else /* * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just * be PAGE_SHIFT */ #define MAX_POSSIBLE_PHYSMEM_BITS BITS_PER_LONG #endif #endif #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT) /* * Memory for allocating for handle keeps object position by * encoding <page, obj_idx> and the encoded value has a room * in least bit(ie, look at obj_to_location). * We use the bit to synchronize between object access by * user and migration. */ #define HANDLE_PIN_BIT 0 /* * Head in allocated object should have OBJ_ALLOCATED_TAG * to identify the object was allocated or not. * It's okay to add the status bit in the least bit because * header keeps handle which is 4byte-aligned address so we * have room for two bit at least. */ #define OBJ_ALLOCATED_TAG 1 #define OBJ_TAG_BITS 1 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) #define FULLNESS_BITS 2 #define CLASS_BITS 8 #define ISOLATED_BITS 3 #define MAGIC_VAL_BITS 8 #define MAX(a, b) ((a) >= (b) ? (a) : (b)) /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ #define ZS_MIN_ALLOC_SIZE \ MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) /* each chunk includes extra space to keep handle */ #define ZS_MAX_ALLOC_SIZE PAGE_SIZE /* * On systems with 4K page size, this gives 255 size classes! There is a * trader-off here: * - Large number of size classes is potentially wasteful as free page are * spread across these classes * - Small number of size classes causes large internal fragmentation * - Probably its better to use specific size classes (empirically * determined). NOTE: all those class sizes must be set as multiple of * ZS_ALIGN to make sure link_free itself never has to span 2 pages. * * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN * (reason above) */ #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS) #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \ ZS_SIZE_CLASS_DELTA) + 1) enum fullness_group { ZS_EMPTY, ZS_ALMOST_EMPTY, ZS_ALMOST_FULL, ZS_FULL, NR_ZS_FULLNESS, }; enum zs_stat_type { CLASS_EMPTY, CLASS_ALMOST_EMPTY, CLASS_ALMOST_FULL, CLASS_FULL, OBJ_ALLOCATED, OBJ_USED, NR_ZS_STAT_TYPE, }; struct zs_size_stat { unsigned long objs[NR_ZS_STAT_TYPE]; }; #ifdef CONFIG_ZSMALLOC_STAT static struct dentry *zs_stat_root; #endif #ifdef CONFIG_COMPACTION static struct vfsmount *zsmalloc_mnt; #endif /* * We assign a page to ZS_ALMOST_EMPTY fullness group when: * n <= N / f, where * n = number of allocated objects * N = total number of objects zspage can store * f = fullness_threshold_frac * * Similarly, we assign zspage to: * ZS_ALMOST_FULL when n > N / f * ZS_EMPTY when n == 0 * ZS_FULL when n == N * * (see: fix_fullness_group()) */ static const int fullness_threshold_frac = 4; static size_t huge_class_size; struct size_class { spinlock_t lock; struct list_head fullness_list[NR_ZS_FULLNESS]; /* * Size of objects stored in this class. Must be multiple * of ZS_ALIGN. */ int size; int objs_per_zspage; /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ int pages_per_zspage; unsigned int index; struct zs_size_stat stats; }; /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ static void SetPageHugeObject(struct page *page) { SetPageOwnerPriv1(page); } static void ClearPageHugeObject(struct page *page) { ClearPageOwnerPriv1(page); } static int PageHugeObject(struct page *page) { return PageOwnerPriv1(page); } /* * Placed within free objects to form a singly linked list. * For every zspage, zspage->freeobj gives head of this list. * * This must be power of 2 and less than or equal to ZS_ALIGN */ struct link_free { union { /* * Free object index; * It's valid for non-allocated object */ unsigned long next; /* * Handle of allocated object. */ unsigned long handle; }; }; struct zs_pool { const char *name; struct size_class *size_class[ZS_SIZE_CLASSES]; struct kmem_cache *handle_cachep; struct kmem_cache *zspage_cachep; atomic_long_t pages_allocated; struct zs_pool_stats stats; /* Compact classes */ struct shrinker shrinker; #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; #endif #ifdef CONFIG_COMPACTION struct inode *inode; struct work_struct free_work; #endif }; struct zspage { struct { unsigned int fullness:FULLNESS_BITS; unsigned int class:CLASS_BITS + 1; unsigned int isolated:ISOLATED_BITS; unsigned int magic:MAGIC_VAL_BITS; }; unsigned int inuse; unsigned int freeobj; struct page *first_page; struct list_head list; /* fullness list */ #ifdef CONFIG_COMPACTION rwlock_t lock; #endif }; struct mapping_area { #ifdef CONFIG_PGTABLE_MAPPING struct vm_struct *vm; /* vm area for mapping object that span pages */ #else char *vm_buf; /* copy buffer for objects that span pages */ #endif char *vm_addr; /* address of kmap_atomic()'ed pages */ enum zs_mapmode vm_mm; /* mapping mode */ }; #ifdef CONFIG_COMPACTION static int zs_register_migration(struct zs_pool *pool); static void zs_unregister_migration(struct zs_pool *pool); static void migrate_lock_init(struct zspage *zspage); static void migrate_read_lock(struct zspage *zspage); static void migrate_read_unlock(struct zspage *zspage); static void kick_deferred_free(struct zs_pool *pool); static void init_deferred_free(struct zs_pool *pool); static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); #else static int zsmalloc_mount(void) { return 0; } static void zsmalloc_unmount(void) {} static int zs_register_migration(struct zs_pool *pool) { return 0; } static void zs_unregister_migration(struct zs_pool *pool) {} static void migrate_lock_init(struct zspage *zspage) {} static void migrate_read_lock(struct zspage *zspage) {} static void migrate_read_unlock(struct zspage *zspage) {} static void kick_deferred_free(struct zs_pool *pool) {} static void init_deferred_free(struct zs_pool *pool) {} static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} #endif static int create_cache(struct zs_pool *pool) { pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, 0, 0, NULL); if (!pool->handle_cachep) return 1; pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage), 0, 0, NULL); if (!pool->zspage_cachep) { kmem_cache_destroy(pool->handle_cachep); pool->handle_cachep = NULL; return 1; } return 0; } static void destroy_cache(struct zs_pool *pool) { kmem_cache_destroy(pool->handle_cachep); kmem_cache_destroy(pool->zspage_cachep); } static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) { return (unsigned long)kmem_cache_alloc(pool->handle_cachep, gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); } static void cache_free_handle(struct zs_pool *pool, unsigned long handle) { kmem_cache_free(pool->handle_cachep, (void *)handle); } static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags) { return kmem_cache_alloc(pool->zspage_cachep, flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); } static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) { kmem_cache_free(pool->zspage_cachep, zspage); } static void record_obj(unsigned long handle, unsigned long obj) { /* * lsb of @obj represents handle lock while other bits * represent object value the handle is pointing so * updating shouldn't do store tearing. */ WRITE_ONCE(*(unsigned long *)handle, obj); } /* zpool driver */ #ifdef CONFIG_ZPOOL static void *zs_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool) { /* * Ignore global gfp flags: zs_malloc() may be invoked from * different contexts and its caller must provide a valid * gfp mask. */ return zs_create_pool(name); } static void zs_zpool_destroy(void *pool) { zs_destroy_pool(pool); } static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle) { *handle = zs_malloc(pool, size, gfp); return *handle ? 0 : -1; } static void zs_zpool_free(void *pool, unsigned long handle) { zs_free(pool, handle); } static void *zs_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { enum zs_mapmode zs_mm; switch (mm) { case ZPOOL_MM_RO: zs_mm = ZS_MM_RO; break; case ZPOOL_MM_WO: zs_mm = ZS_MM_WO; break; case ZPOOL_MM_RW: /* fallthru */ default: zs_mm = ZS_MM_RW; break; } return zs_map_object(pool, handle, zs_mm); } static void zs_zpool_unmap(void *pool, unsigned long handle) { zs_unmap_object(pool, handle); } static u64 zs_zpool_total_size(void *pool) { return zs_get_total_pages(pool) << PAGE_SHIFT; } static struct zpool_driver zs_zpool_driver = { .type = "zsmalloc", .owner = THIS_MODULE, .create = zs_zpool_create, .destroy = zs_zpool_destroy, .malloc = zs_zpool_malloc, .free = zs_zpool_free, .map = zs_zpool_map, .unmap = zs_zpool_unmap, .total_size = zs_zpool_total_size, }; MODULE_ALIAS("zpool-zsmalloc"); #endif /* CONFIG_ZPOOL */ /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ static DEFINE_PER_CPU(struct mapping_area, zs_map_area); static bool is_zspage_isolated(struct zspage *zspage) { return zspage->isolated; } static __maybe_unused int is_first_page(struct page *page) { return PagePrivate(page); } /* Protected by class->lock */ static inline int get_zspage_inuse(struct zspage *zspage) { return zspage->inuse; } static inline void set_zspage_inuse(struct zspage *zspage, int val) { zspage->inuse = val; } static inline void mod_zspage_inuse(struct zspage *zspage, int val) { zspage->inuse += val; } static inline struct page *get_first_page(struct zspage *zspage) { struct page *first_page = zspage->first_page; VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); return first_page; } static inline int get_first_obj_offset(struct page *page) { return page->units; } static inline void set_first_obj_offset(struct page *page, int offset) { page->units = offset; } static inline unsigned int get_freeobj(struct zspage *zspage) { return zspage->freeobj; } static inline void set_freeobj(struct zspage *zspage, unsigned int obj) { zspage->freeobj = obj; } static void get_zspage_mapping(struct zspage *zspage, unsigned int *class_idx, enum fullness_group *fullness) { BUG_ON(zspage->magic != ZSPAGE_MAGIC); *fullness = zspage->fullness; *class_idx = zspage->class; } static void set_zspage_mapping(struct zspage *zspage, unsigned int class_idx, enum fullness_group fullness) { zspage->class = class_idx; zspage->fullness = fullness; } /* * zsmalloc divides the pool into various size classes where each * class maintains a list of zspages where each zspage is divided * into equal sized chunks. Each allocation falls into one of these * classes depending on its size. This function returns index of the * size class which has chunk size big enough to hold the give size. */ static int get_size_class_index(int size) { int idx = 0; if (likely(size > ZS_MIN_ALLOC_SIZE)) idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, ZS_SIZE_CLASS_DELTA); return min_t(int, ZS_SIZE_CLASSES - 1, idx); } /* type can be of enum type zs_stat_type or fullness_group */ static inline void zs_stat_inc(struct size_class *class, int type, unsigned long cnt) { class->stats.objs[type] += cnt; } /* type can be of enum type zs_stat_type or fullness_group */ static inline void zs_stat_dec(struct size_class *class, int type, unsigned long cnt) { class->stats.objs[type] -= cnt; } /* type can be of enum type zs_stat_type or fullness_group */ static inline unsigned long zs_stat_get(struct size_class *class, int type) { return class->stats.objs[type]; } #ifdef CONFIG_ZSMALLOC_STAT static void __init zs_stat_init(void) { if (!debugfs_initialized()) { pr_warn("debugfs not available, stat dir not created\n"); return; } zs_stat_root = debugfs_create_dir("zsmalloc", NULL); if (!zs_stat_root) pr_warn("debugfs 'zsmalloc' stat dir creation failed\n"); } static void __exit zs_stat_exit(void) { debugfs_remove_recursive(zs_stat_root); } static unsigned long zs_can_compact(struct size_class *class); static int zs_stats_size_show(struct seq_file *s, void *v) { int i; struct zs_pool *pool = s->private; struct size_class *class; int objs_per_zspage; unsigned long class_almost_full, class_almost_empty; unsigned long obj_allocated, obj_used, pages_used, freeable; unsigned long total_class_almost_full = 0, total_class_almost_empty = 0; unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0; unsigned long total_freeable = 0; seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n", "class", "size", "almost_full", "almost_empty", "obj_allocated", "obj_used", "pages_used", "pages_per_zspage", "freeable"); for (i = 0; i < ZS_SIZE_CLASSES; i++) { class = pool->size_class[i]; if (class->index != i) continue; spin_lock(&class->lock); class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); obj_used = zs_stat_get(class, OBJ_USED); freeable = zs_can_compact(class); spin_unlock(&class->lock); objs_per_zspage = class->objs_per_zspage; pages_used = obj_allocated / objs_per_zspage * class->pages_per_zspage; seq_printf(s, " %5u %5u %11lu %12lu %13lu" " %10lu %10lu %16d %8lu\n", i, class->size, class_almost_full, class_almost_empty, obj_allocated, obj_used, pages_used, class->pages_per_zspage, freeable); total_class_almost_full += class_almost_full; total_class_almost_empty += class_almost_empty; total_objs += obj_allocated; total_used_objs += obj_used; total_pages += pages_used; total_freeable += freeable; } seq_puts(s, "\n"); seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n", "Total", "", total_class_almost_full, total_class_almost_empty, total_objs, total_used_objs, total_pages, "", total_freeable); return 0; } DEFINE_SHOW_ATTRIBUTE(zs_stats_size); static void zs_pool_stat_create(struct zs_pool *pool, const char *name) { struct dentry *entry; if (!zs_stat_root) { pr_warn("no root stat dir, not creating <%s> stat dir\n", name); return; } entry = debugfs_create_dir(name, zs_stat_root); if (!entry) { pr_warn("debugfs dir <%s> creation failed\n", name); return; } pool->stat_dentry = entry; entry = debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool, &zs_stats_size_fops); if (!entry) { pr_warn("%s: debugfs file entry <%s> creation failed\n", name, "classes"); debugfs_remove_recursive(pool->stat_dentry); pool->stat_dentry = NULL; } } static void zs_pool_stat_destroy(struct zs_pool *pool) { debugfs_remove_recursive(pool->stat_dentry); } #else /* CONFIG_ZSMALLOC_STAT */ static void __init zs_stat_init(void) { } static void __exit zs_stat_exit(void) { } static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name) { } static inline void zs_pool_stat_destroy(struct zs_pool *pool) { } #endif /* * For each size class, zspages are divided into different groups * depending on how "full" they are. This was done so that we could * easily find empty or nearly empty zspages when we try to shrink * the pool (not yet implemented). This function returns fullness * status of the given page. */ static enum fullness_group get_fullness_group(struct size_class *class, struct zspage *zspage) { int inuse, objs_per_zspage; enum fullness_group fg; inuse = get_zspage_inuse(zspage); objs_per_zspage = class->objs_per_zspage; if (inuse == 0) fg = ZS_EMPTY; else if (inuse == objs_per_zspage) fg = ZS_FULL; else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac) fg = ZS_ALMOST_EMPTY; else fg = ZS_ALMOST_FULL; return fg; } /* * Each size class maintains various freelists and zspages are assigned * to one of these freelists based on the number of live objects they * have. This functions inserts the given zspage into the freelist * identified by <class, fullness_group>. */ static void insert_zspage(struct size_class *class, struct zspage *zspage, enum fullness_group fullness) { struct zspage *head; zs_stat_inc(class, fullness, 1); head = list_first_entry_or_null(&class->fullness_list[fullness], struct zspage, list); /* * We want to see more ZS_FULL pages and less almost empty/full. * Put pages with higher ->inuse first. */ if (head) { if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) { list_add(&zspage->list, &head->list); return; } } list_add(&zspage->list, &class->fullness_list[fullness]); } /* * This function removes the given zspage from the freelist identified * by <class, fullness_group>. */ static void remove_zspage(struct size_class *class, struct zspage *zspage, enum fullness_group fullness) { VM_BUG_ON(list_empty(&class->fullness_list[fullness])); VM_BUG_ON(is_zspage_isolated(zspage)); list_del_init(&zspage->list); zs_stat_dec(class, fullness, 1); } /* * Each size class maintains zspages in different fullness groups depending * on the number of live objects they contain. When allocating or freeing * objects, the fullness status of the page can change, say, from ALMOST_FULL * to ALMOST_EMPTY when freeing an object. This function checks if such * a status change has occurred for the given page and accordingly moves the * page from the freelist of the old fullness group to that of the new * fullness group. */ static enum fullness_group fix_fullness_group(struct size_class *class, struct zspage *zspage) { int class_idx; enum fullness_group currfg, newfg; get_zspage_mapping(zspage, &class_idx, &currfg); newfg = get_fullness_group(class, zspage); if (newfg == currfg) goto out; if (!is_zspage_isolated(zspage)) { remove_zspage(class, zspage, currfg); insert_zspage(class, zspage, newfg); } set_zspage_mapping(zspage, class_idx, newfg); out: return newfg; } /* * We have to decide on how many pages to link together * to form a zspage for each size class. This is important * to reduce wastage due to unusable space left at end of * each zspage which is given as: * wastage = Zp % class_size * usage = Zp - wastage * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... * * For example, for size class of 3/8 * PAGE_SIZE, we should * link together 3 PAGE_SIZE sized pages to form a zspage * since then we can perfectly fit in 8 such objects. */ static int get_pages_per_zspage(int class_size) { int i, max_usedpc = 0; /* zspage order which gives maximum used size per KB */ int max_usedpc_order = 1; for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { int zspage_size; int waste, usedpc; zspage_size = i * PAGE_SIZE; waste = zspage_size % class_size; usedpc = (zspage_size - waste) * 100 / zspage_size; if (usedpc > max_usedpc) { max_usedpc = usedpc; max_usedpc_order = i; } } return max_usedpc_order; } static struct zspage *get_zspage(struct page *page) { struct zspage *zspage = (struct zspage *)page->private; BUG_ON(zspage->magic != ZSPAGE_MAGIC); return zspage; } static struct page *get_next_page(struct page *page) { if (unlikely(PageHugeObject(page))) return NULL; return page->freelist; } /** * obj_to_location - get (<page>, <obj_idx>) from encoded object value * @obj: the encoded object value * @page: page object resides in zspage * @obj_idx: object index */ static void obj_to_location(unsigned long obj, struct page **page, unsigned int *obj_idx) { obj >>= OBJ_TAG_BITS; *page = pfn_to_page(obj >> OBJ_INDEX_BITS); *obj_idx = (obj & OBJ_INDEX_MASK); } /** * location_to_obj - get obj value encoded from (<page>, <obj_idx>) * @page: page object resides in zspage * @obj_idx: object index */ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) { unsigned long obj; obj = page_to_pfn(page) << OBJ_INDEX_BITS; obj |= obj_idx & OBJ_INDEX_MASK; obj <<= OBJ_TAG_BITS; return obj; } static unsigned long handle_to_obj(unsigned long handle) { return *(unsigned long *)handle; } static unsigned long obj_to_head(struct page *page, void *obj) { if (unlikely(PageHugeObject(page))) { VM_BUG_ON_PAGE(!is_first_page(page), page); return page->index; } else return *(unsigned long *)obj; } static inline int testpin_tag(unsigned long handle) { return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); } static inline int trypin_tag(unsigned long handle) { return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); } static void pin_tag(unsigned long handle) { bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); } static void unpin_tag(unsigned long handle) { bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); } static void reset_page(struct page *page) { __ClearPageMovable(page); ClearPagePrivate(page); set_page_private(page, 0); page_mapcount_reset(page); ClearPageHugeObject(page); page->freelist = NULL; } static int trylock_zspage(struct zspage *zspage) { struct page *cursor, *fail; for (cursor = get_first_page(zspage); cursor != NULL; cursor = get_next_page(cursor)) { if (!trylock_page(cursor)) { fail = cursor; goto unlock; } } return 1; unlock: for (cursor = get_first_page(zspage); cursor != fail; cursor = get_next_page(cursor)) unlock_page(cursor); return 0; } static void __free_zspage(struct zs_pool *pool, struct size_class *class, struct zspage *zspage) { struct page *page, *next; enum fullness_group fg; unsigned int class_idx; get_zspage_mapping(zspage, &class_idx, &fg); assert_spin_locked(&class->lock); VM_BUG_ON(get_zspage_inuse(zspage)); VM_BUG_ON(fg != ZS_EMPTY); next = page = get_first_page(zspage); do { VM_BUG_ON_PAGE(!PageLocked(page), page); next = get_next_page(page); reset_page(page); unlock_page(page); dec_zone_page_state(page, NR_ZSPAGES); put_page(page); page = next; } while (page != NULL); cache_free_zspage(pool, zspage); zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); } static void free_zspage(struct zs_pool *pool, struct size_class *class, struct zspage *zspage) { VM_BUG_ON(get_zspage_inuse(zspage)); VM_BUG_ON(list_empty(&zspage->list)); if (!trylock_zspage(zspage)) { kick_deferred_free(pool); return; } remove_zspage(class, zspage, ZS_EMPTY); __free_zspage(pool, class, zspage); } /* Initialize a newly allocated zspage */ static void init_zspage(struct size_class *class, struct zspage *zspage) { unsigned int freeobj = 1; unsigned long off = 0; struct page *page = get_first_page(zspage); while (page) { struct page *next_page; struct link_free *link; void *vaddr; set_first_obj_offset(page, off); vaddr = kmap_atomic(page); link = (struct link_free *)vaddr + off / sizeof(*link); while ((off += class->size) < PAGE_SIZE) { link->next = freeobj++ << OBJ_TAG_BITS; link += class->size / sizeof(*link); } /* * We now come to the last (full or partial) object on this * page, which must point to the first object on the next * page (if present) */ next_page = get_next_page(page); if (next_page) { link->next = freeobj++ << OBJ_TAG_BITS; } else { /* * Reset OBJ_TAG_BITS bit to last link to tell * whether it's allocated object or not. */ link->next = -1UL << OBJ_TAG_BITS; } kunmap_atomic(vaddr); page = next_page; off %= PAGE_SIZE; } set_freeobj(zspage, 0); } static void create_page_chain(struct size_class *class, struct zspage *zspage, struct page *pages[]) { int i; struct page *page; struct page *prev_page = NULL; int nr_pages = class->pages_per_zspage; /* * Allocate individual pages and link them together as: * 1. all pages are linked together using page->freelist * 2. each sub-page point to zspage using page->private * * we set PG_private to identify the first page (i.e. no other sub-page * has this flag set). */ for (i = 0; i < nr_pages; i++) { page = pages[i]; set_page_private(page, (unsigned long)zspage); page->freelist = NULL; if (i == 0) { zspage->first_page = page; SetPagePrivate(page); if (unlikely(class->objs_per_zspage == 1 && class->pages_per_zspage == 1)) SetPageHugeObject(page); } else { prev_page->freelist = page; } prev_page = page; } } /* * Allocate a zspage for the given size class */ static struct zspage *alloc_zspage(struct zs_pool *pool, struct size_class *class, gfp_t gfp) { int i; struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; struct zspage *zspage = cache_alloc_zspage(pool, gfp); if (!zspage) return NULL; memset(zspage, 0, sizeof(struct zspage)); zspage->magic = ZSPAGE_MAGIC; migrate_lock_init(zspage); for (i = 0; i < class->pages_per_zspage; i++) { struct page *page; page = alloc_page(gfp); if (!page) { while (--i >= 0) { dec_zone_page_state(pages[i], NR_ZSPAGES); __free_page(pages[i]); } cache_free_zspage(pool, zspage); return NULL; } inc_zone_page_state(page, NR_ZSPAGES); pages[i] = page; } create_page_chain(class, zspage, pages); init_zspage(class, zspage); return zspage; } static struct zspage *find_get_zspage(struct size_class *class) { int i; struct zspage *zspage; for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) { zspage = list_first_entry_or_null(&class->fullness_list[i], struct zspage, list); if (zspage) break; } return zspage; } #ifdef CONFIG_PGTABLE_MAPPING static inline int __zs_cpu_up(struct mapping_area *area) { /* * Make sure we don't leak memory if a cpu UP notification * and zs_init() race and both call zs_cpu_up() on the same cpu */ if (area->vm) return 0; area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); if (!area->vm) return -ENOMEM; return 0; } static inline void __zs_cpu_down(struct mapping_area *area) { if (area->vm) free_vm_area(area->vm); area->vm = NULL; } static inline void *__zs_map_object(struct mapping_area *area, struct page *pages[2], int off, int size) { BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); area->vm_addr = area->vm->addr; return area->vm_addr + off; } static inline void __zs_unmap_object(struct mapping_area *area, struct page *pages[2], int off, int size) { unsigned long addr = (unsigned long)area->vm_addr; unmap_kernel_range(addr, PAGE_SIZE * 2); } #else /* CONFIG_PGTABLE_MAPPING */ static inline int __zs_cpu_up(struct mapping_area *area) { /* * Make sure we don't leak memory if a cpu UP notification * and zs_init() race and both call zs_cpu_up() on the same cpu */ if (area->vm_buf) return 0; area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); if (!area->vm_buf) return -ENOMEM; return 0; } static inline void __zs_cpu_down(struct mapping_area *area) { kfree(area->vm_buf); area->vm_buf = NULL; } static void *__zs_map_object(struct mapping_area *area, struct page *pages[2], int off, int size) { int sizes[2]; void *addr; char *buf = area->vm_buf; /* disable page faults to match kmap_atomic() return conditions */ pagefault_disable(); /* no read fastpath */ if (area->vm_mm == ZS_MM_WO) goto out; sizes[0] = PAGE_SIZE - off; sizes[1] = size - sizes[0]; /* copy object to per-cpu buffer */ addr = kmap_atomic(pages[0]); memcpy(buf, addr + off, sizes[0]); kunmap_atomic(addr); addr = kmap_atomic(pages[1]); memcpy(buf + sizes[0], addr, sizes[1]); kunmap_atomic(addr); out: return area->vm_buf; } static void __zs_unmap_object(struct mapping_area *area, struct page *pages[2], int off, int size) { int sizes[2]; void *addr; char *buf; /* no write fastpath */ if (area->vm_mm == ZS_MM_RO) goto out; buf = area->vm_buf; buf = buf + ZS_HANDLE_SIZE; size -= ZS_HANDLE_SIZE; off += ZS_HANDLE_SIZE; sizes[0] = PAGE_SIZE - off; sizes[1] = size - sizes[0]; /* copy per-cpu buffer to object */ addr = kmap_atomic(pages[0]); memcpy(addr + off, buf, sizes[0]); kunmap_atomic(addr); addr = kmap_atomic(pages[1]); memcpy(addr, buf + sizes[0], sizes[1]); kunmap_atomic(addr); out: /* enable page faults to match kunmap_atomic() return conditions */ pagefault_enable(); } #endif /* CONFIG_PGTABLE_MAPPING */ static int zs_cpu_prepare(unsigned int cpu) { struct mapping_area *area; area = &per_cpu(zs_map_area, cpu); return __zs_cpu_up(area); } static int zs_cpu_dead(unsigned int cpu) { struct mapping_area *area; area = &per_cpu(zs_map_area, cpu); __zs_cpu_down(area); return 0; } static bool can_merge(struct size_class *prev, int pages_per_zspage, int objs_per_zspage) { if (prev->pages_per_zspage == pages_per_zspage && prev->objs_per_zspage == objs_per_zspage) return true; return false; } static bool zspage_full(struct size_class *class, struct zspage *zspage) { return get_zspage_inuse(zspage) == class->objs_per_zspage; } unsigned long zs_get_total_pages(struct zs_pool *pool) { return atomic_long_read(&pool->pages_allocated); } EXPORT_SYMBOL_GPL(zs_get_total_pages); /** * zs_map_object - get address of allocated object from handle. * @pool: pool from which the object was allocated * @handle: handle returned from zs_malloc * @mm: maping mode to use * * Before using an object allocated from zs_malloc, it must be mapped using * this function. When done with the object, it must be unmapped using * zs_unmap_object. * * Only one object can be mapped per cpu at a time. There is no protection * against nested mappings. * * This function returns with preemption and page faults disabled. */ void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm) { struct zspage *zspage; struct page *page; unsigned long obj, off; unsigned int obj_idx; unsigned int class_idx; enum fullness_group fg; struct size_class *class; struct mapping_area *area; struct page *pages[2]; void *ret; /* * Because we use per-cpu mapping areas shared among the * pools/users, we can't allow mapping in interrupt context * because it can corrupt another users mappings. */ BUG_ON(in_interrupt()); /* From now on, migration cannot move the object */ pin_tag(handle); obj = handle_to_obj(handle); obj_to_location(obj, &page, &obj_idx); zspage = get_zspage(page); /* migration cannot move any subpage in this zspage */ migrate_read_lock(zspage); get_zspage_mapping(zspage, &class_idx, &fg); class = pool->size_class[class_idx]; off = (class->size * obj_idx) & ~PAGE_MASK; area = &get_cpu_var(zs_map_area); area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ area->vm_addr = kmap_atomic(page); ret = area->vm_addr + off; goto out; } /* this object spans two pages */ pages[0] = page; pages[1] = get_next_page(page); BUG_ON(!pages[1]); ret = __zs_map_object(area, pages, off, class->size); out: if (likely(!PageHugeObject(page))) ret += ZS_HANDLE_SIZE; return ret; } EXPORT_SYMBOL_GPL(zs_map_object); void zs_unmap_object(struct zs_pool *pool, unsigned long handle) { struct zspage *zspage; struct page *page; unsigned long obj, off; unsigned int obj_idx; unsigned int class_idx; enum fullness_group fg; struct size_class *class; struct mapping_area *area; obj = handle_to_obj(handle); obj_to_location(obj, &page, &obj_idx); zspage = get_zspage(page); get_zspage_mapping(zspage, &class_idx, &fg); class = pool->size_class[class_idx]; off = (class->size * obj_idx) & ~PAGE_MASK; area = this_cpu_ptr(&zs_map_area); if (off + class->size <= PAGE_SIZE) kunmap_atomic(area->vm_addr); else { struct page *pages[2]; pages[0] = page; pages[1] = get_next_page(page); BUG_ON(!pages[1]); __zs_unmap_object(area, pages, off, class->size); } put_cpu_var(zs_map_area); migrate_read_unlock(zspage); unpin_tag(handle); } EXPORT_SYMBOL_GPL(zs_unmap_object); /** * zs_huge_class_size() - Returns the size (in bytes) of the first huge * zsmalloc &size_class. * @pool: zsmalloc pool to use * * The function returns the size of the first huge class - any object of equal * or bigger size will be stored in zspage consisting of a single physical * page. * * Context: Any context. * * Return: the size (in bytes) of the first huge zsmalloc &size_class. */ size_t zs_huge_class_size(struct zs_pool *pool) { return huge_class_size; } EXPORT_SYMBOL_GPL(zs_huge_class_size); static unsigned long obj_malloc(struct size_class *class, struct zspage *zspage, unsigned long handle) { int i, nr_page, offset; unsigned long obj; struct link_free *link; struct page *m_page; unsigned long m_offset; void *vaddr; handle |= OBJ_ALLOCATED_TAG; obj = get_freeobj(zspage); offset = obj * class->size; nr_page = offset >> PAGE_SHIFT; m_offset = offset & ~PAGE_MASK; m_page = get_first_page(zspage); for (i = 0; i < nr_page; i++) m_page = get_next_page(m_page); vaddr = kmap_atomic(m_page); link = (struct link_free *)vaddr + m_offset / sizeof(*link); set_freeobj(zspage, link->next >> OBJ_TAG_BITS); if (likely(!PageHugeObject(m_page))) /* record handle in the header of allocated chunk */ link->handle = handle; else /* record handle to page->index */ zspage->first_page->index = handle; kunmap_atomic(vaddr); mod_zspage_inuse(zspage, 1); zs_stat_inc(class, OBJ_USED, 1); obj = location_to_obj(m_page, obj); return obj; } /** * zs_malloc - Allocate block of given size from pool. * @pool: pool to allocate from * @size: size of block to allocate * @gfp: gfp flags when allocating object * * On success, handle to the allocated object is returned, * otherwise 0. * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. */ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) { unsigned long handle, obj; struct size_class *class; enum fullness_group newfg; struct zspage *zspage; if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) return 0; handle = cache_alloc_handle(pool, gfp); if (!handle) return 0; /* extra space in chunk to keep the handle */ size += ZS_HANDLE_SIZE; class = pool->size_class[get_size_class_index(size)]; spin_lock(&class->lock); zspage = find_get_zspage(class); if (likely(zspage)) { obj = obj_malloc(class, zspage, handle); /* Now move the zspage to another fullness group, if required */ fix_fullness_group(class, zspage); record_obj(handle, obj); spin_unlock(&class->lock); return handle; } spin_unlock(&class->lock); zspage = alloc_zspage(pool, class, gfp); if (!zspage) { cache_free_handle(pool, handle); return 0; } spin_lock(&class->lock); obj = obj_malloc(class, zspage, handle); newfg = get_fullness_group(class, zspage); insert_zspage(class, zspage, newfg); set_zspage_mapping(zspage, class->index, newfg); record_obj(handle, obj); atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); /* We completely set up zspage so mark them as movable */ SetZsPageMovable(pool, zspage); spin_unlock(&class->lock); return handle; } EXPORT_SYMBOL_GPL(zs_malloc); static void obj_free(struct size_class *class, unsigned long obj) { struct link_free *link; struct zspage *zspage; struct page *f_page; unsigned long f_offset; unsigned int f_objidx; void *vaddr; obj &= ~OBJ_ALLOCATED_TAG; obj_to_location(obj, &f_page, &f_objidx); f_offset = (class->size * f_objidx) & ~PAGE_MASK; zspage = get_zspage(f_page); vaddr = kmap_atomic(f_page); /* Insert this object in containing zspage's freelist */ link = (struct link_free *)(vaddr + f_offset); link->next = get_freeobj(zspage) << OBJ_TAG_BITS; kunmap_atomic(vaddr); set_freeobj(zspage, f_objidx); mod_zspage_inuse(zspage, -1); zs_stat_dec(class, OBJ_USED, 1); } void zs_free(struct zs_pool *pool, unsigned long handle) { struct zspage *zspage; struct page *f_page; unsigned long obj; unsigned int f_objidx; int class_idx; struct size_class *class; enum fullness_group fullness; bool isolated; if (unlikely(!handle)) return; pin_tag(handle); obj = handle_to_obj(handle); obj_to_location(obj, &f_page, &f_objidx); zspage = get_zspage(f_page); migrate_read_lock(zspage); get_zspage_mapping(zspage, &class_idx, &fullness); class = pool->size_class[class_idx]; spin_lock(&class->lock); obj_free(class, obj); fullness = fix_fullness_group(class, zspage); if (fullness != ZS_EMPTY) { migrate_read_unlock(zspage); goto out; } isolated = is_zspage_isolated(zspage); migrate_read_unlock(zspage); /* If zspage is isolated, zs_page_putback will free the zspage */ if (likely(!isolated)) free_zspage(pool, class, zspage); out: spin_unlock(&class->lock); unpin_tag(handle); cache_free_handle(pool, handle); } EXPORT_SYMBOL_GPL(zs_free); static void zs_object_copy(struct size_class *class, unsigned long dst, unsigned long src) { struct page *s_page, *d_page; unsigned int s_objidx, d_objidx; unsigned long s_off, d_off; void *s_addr, *d_addr; int s_size, d_size, size; int written = 0; s_size = d_size = class->size; obj_to_location(src, &s_page, &s_objidx); obj_to_location(dst, &d_page, &d_objidx); s_off = (class->size * s_objidx) & ~PAGE_MASK; d_off = (class->size * d_objidx) & ~PAGE_MASK; if (s_off + class->size > PAGE_SIZE) s_size = PAGE_SIZE - s_off; if (d_off + class->size > PAGE_SIZE) d_size = PAGE_SIZE - d_off; s_addr = kmap_atomic(s_page); d_addr = kmap_atomic(d_page); while (1) { size = min(s_size, d_size); memcpy(d_addr + d_off, s_addr + s_off, size); written += size; if (written == class->size) break; s_off += size; s_size -= size; d_off += size; d_size -= size; if (s_off >= PAGE_SIZE) { kunmap_atomic(d_addr); kunmap_atomic(s_addr); s_page = get_next_page(s_page); s_addr = kmap_atomic(s_page); d_addr = kmap_atomic(d_page); s_size = class->size - written; s_off = 0; } if (d_off >= PAGE_SIZE) { kunmap_atomic(d_addr); d_page = get_next_page(d_page); d_addr = kmap_atomic(d_page); d_size = class->size - written; d_off = 0; } } kunmap_atomic(d_addr); kunmap_atomic(s_addr); } /* * Find alloced object in zspage from index object and * return handle. */ static unsigned long find_alloced_obj(struct size_class *class, struct page *page, int *obj_idx) { unsigned long head; int offset = 0; int index = *obj_idx; unsigned long handle = 0; void *addr = kmap_atomic(page); offset = get_first_obj_offset(page); offset += class->size * index; while (offset < PAGE_SIZE) { head = obj_to_head(page, addr + offset); if (head & OBJ_ALLOCATED_TAG) { handle = head & ~OBJ_ALLOCATED_TAG; if (trypin_tag(handle)) break; handle = 0; } offset += class->size; index++; } kunmap_atomic(addr); *obj_idx = index; return handle; } struct zs_compact_control { /* Source spage for migration which could be a subpage of zspage */ struct page *s_page; /* Destination page for migration which should be a first page * of zspage. */ struct page *d_page; /* Starting object index within @s_page which used for live object * in the subpage. */ int obj_idx; }; static int migrate_zspage(struct zs_pool *pool, struct size_class *class, struct zs_compact_control *cc) { unsigned long used_obj, free_obj; unsigned long handle; struct page *s_page = cc->s_page; struct page *d_page = cc->d_page; int obj_idx = cc->obj_idx; int ret = 0; while (1) { handle = find_alloced_obj(class, s_page, &obj_idx); if (!handle) { s_page = get_next_page(s_page); if (!s_page) break; obj_idx = 0; continue; } /* Stop if there is no more space */ if (zspage_full(class, get_zspage(d_page))) { unpin_tag(handle); ret = -ENOMEM; break; } used_obj = handle_to_obj(handle); free_obj = obj_malloc(class, get_zspage(d_page), handle); zs_object_copy(class, free_obj, used_obj); obj_idx++; /* * record_obj updates handle's value to free_obj and it will * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which * breaks synchronization using pin_tag(e,g, zs_free) so * let's keep the lock bit. */ free_obj |= BIT(HANDLE_PIN_BIT); record_obj(handle, free_obj); unpin_tag(handle); obj_free(class, used_obj); } /* Remember last position in this iteration */ cc->s_page = s_page; cc->obj_idx = obj_idx; return ret; } static struct zspage *isolate_zspage(struct size_class *class, bool source) { int i; struct zspage *zspage; enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL}; if (!source) { fg[0] = ZS_ALMOST_FULL; fg[1] = ZS_ALMOST_EMPTY; } for (i = 0; i < 2; i++) { zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], struct zspage, list); if (zspage) { VM_BUG_ON(is_zspage_isolated(zspage)); remove_zspage(class, zspage, fg[i]); return zspage; } } return zspage; } /* * putback_zspage - add @zspage into right class's fullness list * @class: destination class * @zspage: target page * * Return @zspage's fullness_group */ static enum fullness_group putback_zspage(struct size_class *class, struct zspage *zspage) { enum fullness_group fullness; VM_BUG_ON(is_zspage_isolated(zspage)); fullness = get_fullness_group(class, zspage); insert_zspage(class, zspage, fullness); set_zspage_mapping(zspage, class->index, fullness); return fullness; } #ifdef CONFIG_COMPACTION /* * To prevent zspage destroy during migration, zspage freeing should * hold locks of all pages in the zspage. */ static void lock_zspage(struct zspage *zspage) { struct page *page = get_first_page(zspage); do { lock_page(page); } while ((page = get_next_page(page)) != NULL); } static struct dentry *zs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { static const struct dentry_operations ops = { .d_dname = simple_dname, }; return mount_pseudo(fs_type, "zsmalloc:", NULL, &ops, ZSMALLOC_MAGIC); } static struct file_system_type zsmalloc_fs = { .name = "zsmalloc", .mount = zs_mount, .kill_sb = kill_anon_super, }; static int zsmalloc_mount(void) { int ret = 0; zsmalloc_mnt = kern_mount(&zsmalloc_fs); if (IS_ERR(zsmalloc_mnt)) ret = PTR_ERR(zsmalloc_mnt); return ret; } static void zsmalloc_unmount(void) { kern_unmount(zsmalloc_mnt); } static void migrate_lock_init(struct zspage *zspage) { rwlock_init(&zspage->lock); } static void migrate_read_lock(struct zspage *zspage) { read_lock(&zspage->lock); } static void migrate_read_unlock(struct zspage *zspage) { read_unlock(&zspage->lock); } static void migrate_write_lock(struct zspage *zspage) { write_lock(&zspage->lock); } static void migrate_write_unlock(struct zspage *zspage) { write_unlock(&zspage->lock); } /* Number of isolated subpage for *page migration* in this zspage */ static void inc_zspage_isolation(struct zspage *zspage) { zspage->isolated++; } static void dec_zspage_isolation(struct zspage *zspage) { zspage->isolated--; } static void replace_sub_page(struct size_class *class, struct zspage *zspage, struct page *newpage, struct page *oldpage) { struct page *page; struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; int idx = 0; page = get_first_page(zspage); do { if (page == oldpage) pages[idx] = newpage; else pages[idx] = page; idx++; } while ((page = get_next_page(page)) != NULL); create_page_chain(class, zspage, pages); set_first_obj_offset(newpage, get_first_obj_offset(oldpage)); if (unlikely(PageHugeObject(oldpage))) newpage->index = oldpage->index; __SetPageMovable(newpage, page_mapping(oldpage)); } static bool zs_page_isolate(struct page *page, isolate_mode_t mode) { struct zs_pool *pool; struct size_class *class; int class_idx; enum fullness_group fullness; struct zspage *zspage; struct address_space *mapping; /* * Page is locked so zspage couldn't be destroyed. For detail, look at * lock_zspage in free_zspage. */ VM_BUG_ON_PAGE(!PageMovable(page), page); VM_BUG_ON_PAGE(PageIsolated(page), page); zspage = get_zspage(page); /* * Without class lock, fullness could be stale while class_idx is okay * because class_idx is constant unless page is freed so we should get * fullness again under class lock. */ get_zspage_mapping(zspage, &class_idx, &fullness); mapping = page_mapping(page); pool = mapping->private_data; class = pool->size_class[class_idx]; spin_lock(&class->lock); if (get_zspage_inuse(zspage) == 0) { spin_unlock(&class->lock); return false; } /* zspage is isolated for object migration */ if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { spin_unlock(&class->lock); return false; } /* * If this is first time isolation for the zspage, isolate zspage from * size_class to prevent further object allocation from the zspage. */ if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { get_zspage_mapping(zspage, &class_idx, &fullness); remove_zspage(class, zspage, fullness); } inc_zspage_isolation(zspage); spin_unlock(&class->lock); return true; } static int zs_page_migrate(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) { struct zs_pool *pool; struct size_class *class; int class_idx; enum fullness_group fullness; struct zspage *zspage; struct page *dummy; void *s_addr, *d_addr, *addr; int offset, pos; unsigned long handle, head; unsigned long old_obj, new_obj; unsigned int obj_idx; int ret = -EAGAIN; /* * We cannot support the _NO_COPY case here, because copy needs to * happen under the zs lock, which does not work with * MIGRATE_SYNC_NO_COPY workflow. */ if (mode == MIGRATE_SYNC_NO_COPY) return -EINVAL; VM_BUG_ON_PAGE(!PageMovable(page), page); VM_BUG_ON_PAGE(!PageIsolated(page), page); zspage = get_zspage(page); /* Concurrent compactor cannot migrate any subpage in zspage */ migrate_write_lock(zspage); get_zspage_mapping(zspage, &class_idx, &fullness); pool = mapping->private_data; class = pool->size_class[class_idx]; offset = get_first_obj_offset(page); spin_lock(&class->lock); if (!get_zspage_inuse(zspage)) { /* * Set "offset" to end of the page so that every loops * skips unnecessary object scanning. */ offset = PAGE_SIZE; } pos = offset; s_addr = kmap_atomic(page); while (pos < PAGE_SIZE) { head = obj_to_head(page, s_addr + pos); if (head & OBJ_ALLOCATED_TAG) { handle = head & ~OBJ_ALLOCATED_TAG; if (!trypin_tag(handle)) goto unpin_objects; } pos += class->size; } /* * Here, any user cannot access all objects in the zspage so let's move. */ d_addr = kmap_atomic(newpage); memcpy(d_addr, s_addr, PAGE_SIZE); kunmap_atomic(d_addr); for (addr = s_addr + offset; addr < s_addr + pos; addr += class->size) { head = obj_to_head(page, addr); if (head & OBJ_ALLOCATED_TAG) { handle = head & ~OBJ_ALLOCATED_TAG; if (!testpin_tag(handle)) BUG(); old_obj = handle_to_obj(handle); obj_to_location(old_obj, &dummy, &obj_idx); new_obj = (unsigned long)location_to_obj(newpage, obj_idx); new_obj |= BIT(HANDLE_PIN_BIT); record_obj(handle, new_obj); } } replace_sub_page(class, zspage, newpage, page); get_page(newpage); dec_zspage_isolation(zspage); /* * Page migration is done so let's putback isolated zspage to * the list if @page is final isolated subpage in the zspage. */ if (!is_zspage_isolated(zspage)) putback_zspage(class, zspage); reset_page(page); put_page(page); page = newpage; ret = MIGRATEPAGE_SUCCESS; unpin_objects: for (addr = s_addr + offset; addr < s_addr + pos; addr += class->size) { head = obj_to_head(page, addr); if (head & OBJ_ALLOCATED_TAG) { handle = head & ~OBJ_ALLOCATED_TAG; if (!testpin_tag(handle)) BUG(); unpin_tag(handle); } } kunmap_atomic(s_addr); spin_unlock(&class->lock); migrate_write_unlock(zspage); return ret; } static void zs_page_putback(struct page *page) { struct zs_pool *pool; struct size_class *class; int class_idx; enum fullness_group fg; struct address_space *mapping; struct zspage *zspage; VM_BUG_ON_PAGE(!PageMovable(page), page); VM_BUG_ON_PAGE(!PageIsolated(page), page); zspage = get_zspage(page); get_zspage_mapping(zspage, &class_idx, &fg); mapping = page_mapping(page); pool = mapping->private_data; class = pool->size_class[class_idx]; spin_lock(&class->lock); dec_zspage_isolation(zspage); if (!is_zspage_isolated(zspage)) { fg = putback_zspage(class, zspage); /* * Due to page_lock, we cannot free zspage immediately * so let's defer. */ if (fg == ZS_EMPTY) schedule_work(&pool->free_work); } spin_unlock(&class->lock); } static const struct address_space_operations zsmalloc_aops = { .isolate_page = zs_page_isolate, .migratepage = zs_page_migrate, .putback_page = zs_page_putback, }; static int zs_register_migration(struct zs_pool *pool) { pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb); if (IS_ERR(pool->inode)) { pool->inode = NULL; return 1; } pool->inode->i_mapping->private_data = pool; pool->inode->i_mapping->a_ops = &zsmalloc_aops; return 0; } static void zs_unregister_migration(struct zs_pool *pool) { flush_work(&pool->free_work); iput(pool->inode); } /* * Caller should hold page_lock of all pages in the zspage * In here, we cannot use zspage meta data. */ static void async_free_zspage(struct work_struct *work) { int i; struct size_class *class; unsigned int class_idx; enum fullness_group fullness; struct zspage *zspage, *tmp; LIST_HEAD(free_pages); struct zs_pool *pool = container_of(work, struct zs_pool, free_work); for (i = 0; i < ZS_SIZE_CLASSES; i++) { class = pool->size_class[i]; if (class->index != i) continue; spin_lock(&class->lock); list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); spin_unlock(&class->lock); } list_for_each_entry_safe(zspage, tmp, &free_pages, list) { list_del(&zspage->list); lock_zspage(zspage); get_zspage_mapping(zspage, &class_idx, &fullness); VM_BUG_ON(fullness != ZS_EMPTY); class = pool->size_class[class_idx]; spin_lock(&class->lock); __free_zspage(pool, pool->size_class[class_idx], zspage); spin_unlock(&class->lock); } }; static void kick_deferred_free(struct zs_pool *pool) { schedule_work(&pool->free_work); } static void init_deferred_free(struct zs_pool *pool) { INIT_WORK(&pool->free_work, async_free_zspage); } static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) { struct page *page = get_first_page(zspage); do { WARN_ON(!trylock_page(page)); __SetPageMovable(page, pool->inode->i_mapping); unlock_page(page); } while ((page = get_next_page(page)) != NULL); } #endif /* * * Based on the number of unused allocated objects calculate * and return the number of pages that we can free. */ static unsigned long zs_can_compact(struct size_class *class) { unsigned long obj_wasted; unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); unsigned long obj_used = zs_stat_get(class, OBJ_USED); if (obj_allocated <= obj_used) return 0; obj_wasted = obj_allocated - obj_used; obj_wasted /= class->objs_per_zspage; return obj_wasted * class->pages_per_zspage; } static void __zs_compact(struct zs_pool *pool, struct size_class *class) { struct zs_compact_control cc; struct zspage *src_zspage; struct zspage *dst_zspage = NULL; spin_lock(&class->lock); while ((src_zspage = isolate_zspage(class, true))) { if (!zs_can_compact(class)) break; cc.obj_idx = 0; cc.s_page = get_first_page(src_zspage); while ((dst_zspage = isolate_zspage(class, false))) { cc.d_page = get_first_page(dst_zspage); /* * If there is no more space in dst_page, resched * and see if anyone had allocated another zspage. */ if (!migrate_zspage(pool, class, &cc)) break; putback_zspage(class, dst_zspage); } /* Stop if we couldn't find slot */ if (dst_zspage == NULL) break; putback_zspage(class, dst_zspage); if (putback_zspage(class, src_zspage) == ZS_EMPTY) { free_zspage(pool, class, src_zspage); pool->stats.pages_compacted += class->pages_per_zspage; } spin_unlock(&class->lock); cond_resched(); spin_lock(&class->lock); } if (src_zspage) putback_zspage(class, src_zspage); spin_unlock(&class->lock); } unsigned long zs_compact(struct zs_pool *pool) { int i; struct size_class *class; for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { class = pool->size_class[i]; if (!class) continue; if (class->index != i) continue; __zs_compact(pool, class); } return pool->stats.pages_compacted; } EXPORT_SYMBOL_GPL(zs_compact); void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats) { memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); } EXPORT_SYMBOL_GPL(zs_pool_stats); static unsigned long zs_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { unsigned long pages_freed; struct zs_pool *pool = container_of(shrinker, struct zs_pool, shrinker); pages_freed = pool->stats.pages_compacted; /* * Compact classes and calculate compaction delta. * Can run concurrently with a manually triggered * (by user) compaction. */ pages_freed = zs_compact(pool) - pages_freed; return pages_freed ? pages_freed : SHRINK_STOP; } static unsigned long zs_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { int i; struct size_class *class; unsigned long pages_to_free = 0; struct zs_pool *pool = container_of(shrinker, struct zs_pool, shrinker); for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { class = pool->size_class[i]; if (!class) continue; if (class->index != i) continue; pages_to_free += zs_can_compact(class); } return pages_to_free; } static void zs_unregister_shrinker(struct zs_pool *pool) { unregister_shrinker(&pool->shrinker); } static int zs_register_shrinker(struct zs_pool *pool) { pool->shrinker.scan_objects = zs_shrinker_scan; pool->shrinker.count_objects = zs_shrinker_count; pool->shrinker.batch = 0; pool->shrinker.seeks = DEFAULT_SEEKS; return register_shrinker(&pool->shrinker); } /** * zs_create_pool - Creates an allocation pool to work from. * @name: pool name to be created * * This function must be called before anything when using * the zsmalloc allocator. * * On success, a pointer to the newly created pool is returned, * otherwise NULL. */ struct zs_pool *zs_create_pool(const char *name) { int i; struct zs_pool *pool; struct size_class *prev_class = NULL; pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return NULL; init_deferred_free(pool); pool->name = kstrdup(name, GFP_KERNEL); if (!pool->name) goto err; if (create_cache(pool)) goto err; /* * Iterate reversely, because, size of size_class that we want to use * for merging should be larger or equal to current size. */ for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { int size; int pages_per_zspage; int objs_per_zspage; struct size_class *class; int fullness = 0; size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; if (size > ZS_MAX_ALLOC_SIZE) size = ZS_MAX_ALLOC_SIZE; pages_per_zspage = get_pages_per_zspage(size); objs_per_zspage = pages_per_zspage * PAGE_SIZE / size; /* * We iterate from biggest down to smallest classes, * so huge_class_size holds the size of the first huge * class. Any object bigger than or equal to that will * endup in the huge class. */ if (pages_per_zspage != 1 && objs_per_zspage != 1 && !huge_class_size) { huge_class_size = size; /* * The object uses ZS_HANDLE_SIZE bytes to store the * handle. We need to subtract it, because zs_malloc() * unconditionally adds handle size before it performs * size class search - so object may be smaller than * huge class size, yet it still can end up in the huge * class because it grows by ZS_HANDLE_SIZE extra bytes * right before class lookup. */ huge_class_size -= (ZS_HANDLE_SIZE - 1); } /* * size_class is used for normal zsmalloc operation such * as alloc/free for that size. Although it is natural that we * have one size_class for each size, there is a chance that we * can get more memory utilization if we use one size_class for * many different sizes whose size_class have same * characteristics. So, we makes size_class point to * previous size_class if possible. */ if (prev_class) { if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) { pool->size_class[i] = prev_class; continue; } } class = kzalloc(sizeof(struct size_class), GFP_KERNEL); if (!class) goto err; class->size = size; class->index = i; class->pages_per_zspage = pages_per_zspage; class->objs_per_zspage = objs_per_zspage; spin_lock_init(&class->lock); pool->size_class[i] = class; for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS; fullness++) INIT_LIST_HEAD(&class->fullness_list[fullness]); prev_class = class; } /* debug only, don't abort if it fails */ zs_pool_stat_create(pool, name); if (zs_register_migration(pool)) goto err; /* * Not critical since shrinker is only used to trigger internal * defragmentation of the pool which is pretty optional thing. If * registration fails we still can use the pool normally and user can * trigger compaction manually. Thus, ignore return code. */ zs_register_shrinker(pool); return pool; err: zs_destroy_pool(pool); return NULL; } EXPORT_SYMBOL_GPL(zs_create_pool); void zs_destroy_pool(struct zs_pool *pool) { int i; zs_unregister_shrinker(pool); zs_unregister_migration(pool); zs_pool_stat_destroy(pool); for (i = 0; i < ZS_SIZE_CLASSES; i++) { int fg; struct size_class *class = pool->size_class[i]; if (!class) continue; if (class->index != i) continue; for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) { if (!list_empty(&class->fullness_list[fg])) { pr_info("Freeing non-empty class with size %db, fullness group %d\n", class->size, fg); } } kfree(class); } destroy_cache(pool); kfree(pool->name); kfree(pool); } EXPORT_SYMBOL_GPL(zs_destroy_pool); static int __init zs_init(void) { int ret; ret = zsmalloc_mount(); if (ret) goto out; ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare", zs_cpu_prepare, zs_cpu_dead); if (ret) goto hp_setup_fail; #ifdef CONFIG_ZPOOL zpool_register_driver(&zs_zpool_driver); #endif zs_stat_init(); return 0; hp_setup_fail: zsmalloc_unmount(); out: return ret; } static void __exit zs_exit(void) { #ifdef CONFIG_ZPOOL zpool_unregister_driver(&zs_zpool_driver); #endif zsmalloc_unmount(); cpuhp_remove_state(CPUHP_MM_ZS_PREPARE); zs_stat_exit(); } module_init(zs_init); module_exit(zs_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
537937.c
// io.c #include <kernel.h> int ioInit(void) { int Status = 0; debug_print ("ioInit: [TODO]\n"); // ... return (int) Status; } //========================================== // This is called by ioctl() in ring3. // OK Isso eh um wrapper. // Chamaremos tty_ioctl() ou outros ... // ... // See: // http://man7.org/linux/man-pages/man2/ioctl.2.html // https://en.wikipedia.org/wiki/Ioctl // The ioctl() system call manipulates the // underlying device parameters of special files. // In particular, many operating characteristics of // character special files (e.g., terminals) may be controlled with // ioctl() requests. The argument fd must be an open file descriptor. // return: // EBADF fd is not a valid file descriptor. // EFAULT argp references an inaccessible memory area. // EINVAL request or argp is not valid. // ENOTTY fd is not associated with a character special device. // ENOTTY // The specified request does not apply to the kind of object // that the file descriptor fd references // See: // https://man7.org/linux/man-pages/man2/ioctl.2.html // #bugbug // arg is the address for the arguments. // We are using argument not as an address sometimes. // it depends on the request number. // Called by sys_ioctl() in sys.c // But this routine can be called by the routines inside the kernel. // pega o arquivo. // checa o tipo de objeto. // Isso deve ser usado principalmente com dispositivos // de caracteres como o terminal. // #todo // check file structure validation. // The TIOCSTI (terminal I/O control, // simulate terminal input) ioctl // function can push a character into a device stream // ENOTTY - "Not a typewriter" // #todo // Now we can use a swit to call different // functions, as tty_ioctl etc. int io_ioctl ( int fd, unsigned long request, unsigned long arg ) { file *f; int ObjectType = -1; debug_print ("io_ioctl: [TODO]\n"); if ( fd < 0 || fd >= OPEN_MAX ) { return (int) (-EBADF); } // Get file pointer. f = (file *) get_file_from_fd(fd); if ( (void *) f == NULL ) { debug_print("io_ioctl: [FAIL] f\n"); return -1; } // #todo // Check validation if( f->magic != 1234 ){ return -1; } // Object types. // #todo: // What type of file we will support here? ObjectType = (int) f->____object; switch (ObjectType){ // Pode isso ?? // Normal file ??? // See: lib/kstdio.c case ObjectTypeFile: debug_print ("io_ioctl: ObjectTypeFile [TEST]\n"); return (int) regularfile_ioctl ( (int) fd, (unsigned long) request, (unsigned long) arg ); break; // tty object // See: drivers/tty/tty.c case ObjectTypeTTY: //case ObjectTypeTerminal: debug_print ("io_ioctl: ObjectTypeTTY\n"); return (int) tty_ioctl ( (int) fd, (unsigned long) request, (unsigned long) arg ); break; // socket object // see: net/socket.c ? case ObjectTypeSocket: debug_print ("io_ioctl: ObjectTypeSocket\n"); return (int) socket_ioctl ( (int) fd, (unsigned long) request, (unsigned long) arg ); break; // Console object // See: user/console.c case ObjectTypeVirtualConsole: debug_print ("io_ioctl: ObjectTypeVirtualConsole\n"); return (int) console_ioctl ( (int) fd, (unsigned long) request, (unsigned long) arg ); break; // keyboard // mouse // serial // disk // ... default: debug_print ("io_ioctl: [FAIL] default object\n"); return -1; //ENOTTY maybe break; }; //fail debug_print ("io_ioctl: Fail\n"); return -1; }
1003442.c
/* * Copyright (c) 2011 Intel Corporation. All Rights Reserved. * Copyright (c) Imagination Technologies Limited, UK * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Elaine Wang <elaine.wang@intel.com> * Zeng Li <zeng.li@intel.com> * */ #include <errno.h> #include <stdlib.h> #include <unistd.h> #include <stdint.h> #include <string.h> #include "psb_def.h" #include "psb_drv_debug.h" #include "psb_surface.h" #include "psb_cmdbuf.h" #include "pnw_hostcode.h" #include "pnw_H264ES.h" #include "pnw_hostheader.h" #include "va/va_enc_h264.h" #define TOPAZ_H264_MAX_BITRATE 50000000 #define INIT_CONTEXT_H264ES context_ENC_p ctx = (context_ENC_p) obj_context->format_data #define SURFACE(id) ((object_surface_p) object_heap_lookup( &ctx->obj_context->driver_data->surface_heap, id )) #define BUFFER(id) ((object_buffer_p) object_heap_lookup( &ctx->obj_context->driver_data->buffer_heap, id )) static void pnw_H264ES_QueryConfigAttributes( VAProfile __maybe_unused profile, VAEntrypoint __maybe_unused entrypoint, VAConfigAttrib *attrib_list, int num_attribs) { int i; drv_debug_msg(VIDEO_DEBUG_GENERAL, "pnw_H264ES_QueryConfigAttributes\n"); /* RateControl attributes */ for (i = 0; i < num_attribs; i++) { switch (attrib_list[i].type) { case VAConfigAttribRTFormat: break; case VAConfigAttribRateControl: attrib_list[i].value = VA_RC_NONE | VA_RC_CBR | VA_RC_VBR | VA_RC_VCM; break; case VAConfigAttribEncMaxRefFrames: attrib_list[i].value = 1; break; default: attrib_list[i].value = VA_ATTRIB_NOT_SUPPORTED; break; } } } static VAStatus pnw_H264ES_ValidateConfig( object_config_p obj_config) { int i; /* Check all attributes */ for (i = 0; i < obj_config->attrib_count; i++) { switch (obj_config->attrib_list[i].type) { case VAConfigAttribRTFormat: /* Ignore */ break; case VAConfigAttribRateControl: break; case VAConfigAttribEncAutoReference: break; case VAConfigAttribEncMaxRefFrames: break; default: return VA_STATUS_ERROR_ATTR_NOT_SUPPORTED; } } return VA_STATUS_SUCCESS; } static VAStatus pnw_H264ES_CreateContext( object_context_p obj_context, object_config_p obj_config) { VAStatus vaStatus = VA_STATUS_SUCCESS; context_ENC_p ctx; int i; unsigned int eRCmode; drv_debug_msg(VIDEO_DEBUG_GENERAL, "pnw_H264ES_CreateContext\n"); vaStatus = pnw_CreateContext(obj_context, obj_config, 0); if (VA_STATUS_SUCCESS != vaStatus) return VA_STATUS_ERROR_ALLOCATION_FAILED; ctx = (context_ENC_p) obj_context->format_data; for (i = 0; i < obj_config->attrib_count; i++) { if (obj_config->attrib_list[i].type == VAConfigAttribRateControl) break; } if (i >= obj_config->attrib_count) eRCmode = VA_RC_NONE; else eRCmode = obj_config->attrib_list[i].value; if (eRCmode == VA_RC_VBR) { ctx->eCodec = IMG_CODEC_H264_VBR; ctx->sRCParams.RCEnable = IMG_TRUE; ctx->sRCParams.bDisableBitStuffing = IMG_FALSE; } else if (eRCmode == VA_RC_CBR) { ctx->eCodec = IMG_CODEC_H264_CBR; ctx->sRCParams.RCEnable = IMG_TRUE; ctx->sRCParams.bDisableBitStuffing = IMG_TRUE; } else if (eRCmode == VA_RC_NONE) { ctx->eCodec = IMG_CODEC_H264_NO_RC; ctx->sRCParams.RCEnable = IMG_FALSE; ctx->sRCParams.bDisableBitStuffing = IMG_FALSE; } else if (eRCmode == VA_RC_VCM) { ctx->eCodec = IMG_CODEC_H264_VCM; ctx->sRCParams.RCEnable = IMG_TRUE; ctx->sRCParams.bDisableBitStuffing = IMG_FALSE; } else return VA_STATUS_ERROR_UNSUPPORTED_RT_FORMAT; drv_debug_msg(VIDEO_DEBUG_GENERAL, "eCodec is %d\n", ctx->eCodec); ctx->eFormat = IMG_CODEC_PL12; /* use default */ ctx->Slices = 1; ctx->idr_pic_id = 1; ctx->buffer_size = 0; ctx->initial_buffer_fullness = 0; //initialize the frame_rate and qp ctx->sRCParams.FrameRate = 30; if (getenv("PSB_VIDEO_SIG_CORE") == NULL) { ctx->Slices = 2; ctx->NumCores = 2; } ctx->ParallelCores = min(ctx->NumCores, ctx->Slices); ctx->IPEControl = pnw__get_ipe_control(ctx->eCodec); switch (obj_config->profile) { case VAProfileH264Baseline: ctx->profile_idc = 5; break; case VAProfileH264Main: ctx->profile_idc = 6; break; default: ctx->profile_idc = 6; break; } return vaStatus; } static void pnw_H264ES_DestroyContext( object_context_p obj_context) { drv_debug_msg(VIDEO_DEBUG_GENERAL, "pnw_H264ES_DestroyPicture\n"); pnw_DestroyContext(obj_context); } static VAStatus pnw_H264ES_BeginPicture( object_context_p obj_context) { INIT_CONTEXT_H264ES; VAStatus vaStatus = VA_STATUS_SUCCESS; drv_debug_msg(VIDEO_DEBUG_GENERAL, "pnw_H264ES_BeginPicture\n"); vaStatus = pnw_BeginPicture(ctx); return vaStatus; } static VAStatus pnw__H264ES_process_sequence_param(context_ENC_p ctx, object_buffer_p obj_buffer) { VAEncSequenceParameterBufferH264 *pSequenceParams; pnw_cmdbuf_p cmdbuf = ctx->obj_context->pnw_cmdbuf; H264_VUI_PARAMS *pVUI_Params = &(ctx->VUI_Params); H264_CROP_PARAMS sCrop; int i; unsigned int frame_size; unsigned int max_bps; ASSERT(obj_buffer->type == VAEncSequenceParameterBufferType); ASSERT(obj_buffer->num_elements == 1); ASSERT(obj_buffer->size == sizeof(VAEncSequenceParameterBufferH264)); if ((obj_buffer->num_elements != 1) || (obj_buffer->size != sizeof(VAEncSequenceParameterBufferH264))) { return VA_STATUS_ERROR_UNKNOWN; } if(ctx->sRCParams.FrameRate == 0) ctx->sRCParams.FrameRate = 30; ctx->obj_context->frame_count = 0; pSequenceParams = (VAEncSequenceParameterBufferH264 *) obj_buffer->buffer_data; obj_buffer->buffer_data = NULL; obj_buffer->size = 0; if (!pSequenceParams->bits_per_second) { pSequenceParams->bits_per_second = ctx->Height * ctx->Width * 30 * 12; drv_debug_msg(VIDEO_DEBUG_GENERAL, "bits_per_second is 0, set to %d\n", pSequenceParams->bits_per_second); } ctx->sRCParams.bBitrateChanged = (pSequenceParams->bits_per_second == ctx->sRCParams.BitsPerSecond ? IMG_FALSE : IMG_TRUE); if (pSequenceParams->bits_per_second > TOPAZ_H264_MAX_BITRATE) { ctx->sRCParams.BitsPerSecond = TOPAZ_H264_MAX_BITRATE; drv_debug_msg(VIDEO_DEBUG_GENERAL, " bits_per_second(%d) exceeds \ the maximum bitrate, set it with %d\n", pSequenceParams->bits_per_second, TOPAZ_H264_MAX_BITRATE); } /* According to Table A-1 Level limits, if resolution is bigger than 625SD, min compression ratio is 4, otherwise min compression ratio is 2 */ max_bps = (ctx->Width * ctx->Height * 3 / 2 ) * 8 * ctx->sRCParams.FrameRate; if (ctx->Width > 720) max_bps /= 4; else max_bps /= 2; drv_debug_msg(VIDEO_DEBUG_GENERAL, " width %d height %d, frame rate %d\n", ctx->Width, ctx->Height, ctx->sRCParams.FrameRate); if (pSequenceParams->bits_per_second > max_bps) { drv_debug_msg(VIDEO_DEBUG_ERROR, "Invalid bitrate %d, violate ITU-T Rec. H.264 (03/2005) A.3.1" "\n clip to %d bps\n", pSequenceParams->bits_per_second, max_bps); ctx->sRCParams.BitsPerSecond = max_bps; } else { /* See 110% target bitrate for VCM. Otherwise, the resulted bitrate is much lower than target bitrate */ if (ctx->eCodec == IMG_CODEC_H264_VCM) pSequenceParams->bits_per_second = pSequenceParams->bits_per_second / 100 * 110; drv_debug_msg(VIDEO_DEBUG_GENERAL, "Bitrate is set to %d\n", pSequenceParams->bits_per_second); ctx->sRCParams.BitsPerSecond = pSequenceParams->bits_per_second; } /*if (ctx->sRCParams.IntraFreq != pSequenceParams->intra_period) ctx->sRCParams.bBitrateChanged = IMG_TRUE;*/ ctx->sRCParams.IDRFreq = pSequenceParams->intra_idr_period; ctx->sRCParams.Slices = ctx->Slices; ctx->sRCParams.QCPOffset = 0; if (ctx->sRCParams.IntraFreq != pSequenceParams->intra_period && ctx->raw_frame_count != 0 && ctx->sRCParams.IntraFreq != 0 && ((ctx->obj_context->frame_count + 1) % ctx->sRCParams.IntraFreq) != 0 && (!ctx->sRCParams.bDisableFrameSkipping)) { drv_debug_msg(VIDEO_DEBUG_ERROR, "Changing intra period value in the middle of a GOP is\n" "not allowed if frame skip isn't disabled.\n" "it can cause I frame been skipped\n"); free(pSequenceParams); return VA_STATUS_ERROR_INVALID_PARAMETER; } else ctx->sRCParams.IntraFreq = pSequenceParams->intra_period; frame_size = ctx->sRCParams.BitsPerSecond / ctx->sRCParams.FrameRate; if (ctx->bInserHRDParams && ctx->buffer_size != 0 && ctx->initial_buffer_fullness != 0) { ctx->sRCParams.BufferSize = ctx->buffer_size; ctx->sRCParams.InitialLevel = ctx->buffer_size - ctx->initial_buffer_fullness; ctx->sRCParams.InitialDelay = ctx->initial_buffer_fullness; } else { ctx->buffer_size = ctx->sRCParams.BitsPerSecond; ctx->initial_buffer_fullness = ctx->sRCParams.BitsPerSecond; ctx->sRCParams.BufferSize = ctx->buffer_size; ctx->sRCParams.InitialLevel = (3 * ctx->sRCParams.BufferSize) >> 4; /* Aligned with target frame size */ ctx->sRCParams.InitialLevel += (frame_size / 2); ctx->sRCParams.InitialLevel /= frame_size; ctx->sRCParams.InitialLevel *= frame_size; ctx->sRCParams.InitialDelay = ctx->buffer_size - ctx->sRCParams.InitialLevel; } if (ctx->raw_frame_count == 0) { for (i = (ctx->ParallelCores - 1); i >= 0; i--) pnw_set_bias(ctx, i); } pVUI_Params->bit_rate_value_minus1 = ctx->sRCParams.BitsPerSecond / 64 - 1; pVUI_Params->cbp_size_value_minus1 = ctx->sRCParams.BufferSize / 64 - 1; if (IMG_CODEC_H264_CBR != ctx->eCodec || ctx->sRCParams.bDisableBitStuffing || ctx->sRCParams.bDisableFrameSkipping) pVUI_Params->CBR = 0; else pVUI_Params->CBR = 1; pVUI_Params->initial_cpb_removal_delay_length_minus1 = BPH_SEI_NAL_INITIAL_CPB_REMOVAL_DELAY_SIZE - 1; pVUI_Params->cpb_removal_delay_length_minus1 = PTH_SEI_NAL_CPB_REMOVAL_DELAY_SIZE - 1; pVUI_Params->dpb_output_delay_length_minus1 = PTH_SEI_NAL_DPB_OUTPUT_DELAY_SIZE - 1; pVUI_Params->time_offset_length = 24; ctx->bInsertVUI = pSequenceParams->vui_parameters_present_flag ? IMG_TRUE: IMG_FALSE; if (ctx->bInsertVUI) { if (pSequenceParams->num_units_in_tick !=0 && pSequenceParams->time_scale !=0 && (pSequenceParams->time_scale > pSequenceParams->num_units_in_tick) ) { pVUI_Params->Time_Scale = pSequenceParams->time_scale; pVUI_Params->num_units_in_tick = pSequenceParams->num_units_in_tick; } else { pVUI_Params->num_units_in_tick = 1; pVUI_Params->Time_Scale = ctx->sRCParams.FrameRate * 2; } } if (ctx->bInsertVUI && pSequenceParams->vui_fields.bits.aspect_ratio_info_present_flag && (pSequenceParams->aspect_ratio_idc == 0xff /* Extended_SAR */)) { pVUI_Params->aspect_ratio_info_present_flag = IMG_TRUE; pVUI_Params->aspect_ratio_idc = 0xff; pVUI_Params->sar_width = pSequenceParams->sar_width; pVUI_Params->sar_height = pSequenceParams->sar_height; } sCrop.bClip = pSequenceParams->frame_cropping_flag; sCrop.LeftCropOffset = 0; sCrop.RightCropOffset = 0; sCrop.TopCropOffset = 0; sCrop.BottomCropOffset = 0; if (!sCrop.bClip) { if (ctx->RawHeight & 0xf) { sCrop.bClip = IMG_TRUE; sCrop.BottomCropOffset = (((ctx->RawHeight + 0xf) & (~0xf)) - ctx->RawHeight) / 2; } if (ctx->RawWidth & 0xf) { sCrop.bClip = IMG_TRUE; sCrop.RightCropOffset = (((ctx->RawWidth + 0xf) & (~0xf)) - ctx->RawWidth) / 2; } } else { sCrop.LeftCropOffset = pSequenceParams->frame_crop_left_offset; sCrop.RightCropOffset = pSequenceParams->frame_crop_right_offset; sCrop.TopCropOffset = pSequenceParams->frame_crop_top_offset; sCrop.BottomCropOffset = pSequenceParams->frame_crop_bottom_offset; } /* sequence header is always inserted */ memset(cmdbuf->header_mem_p + ctx->seq_header_ofs, 0, HEADER_SIZE); /* if (ctx->bInserHRDParams) { memset(cmdbuf->header_mem_p + ctx->aud_header_ofs, 0, HEADER_SIZE); pnw__H264_prepare_AUD_header(cmdbuf->header_mem_p + ctx->aud_header_ofs); pnw_cmdbuf_insert_command_package(ctx->obj_context, ctx->ParallelCores - 1, MTX_CMDID_DO_HEADER, &cmdbuf->header_mem, ctx->aud_header_ofs); } */ if (ctx->eCodec == IMG_CODEC_H264_NO_RC) pnw__H264_prepare_sequence_header(cmdbuf->header_mem_p + ctx->seq_header_ofs, pSequenceParams->picture_width_in_mbs, pSequenceParams->picture_height_in_mbs, pSequenceParams->vui_parameters_present_flag, pSequenceParams->vui_parameters_present_flag ? (pVUI_Params) : NULL, &sCrop, pSequenceParams->level_idc, ctx->profile_idc); else pnw__H264_prepare_sequence_header(cmdbuf->header_mem_p + ctx->seq_header_ofs, pSequenceParams->picture_width_in_mbs, pSequenceParams->picture_height_in_mbs, pSequenceParams->vui_parameters_present_flag, pSequenceParams->vui_parameters_present_flag ? (pVUI_Params) : NULL, &sCrop, pSequenceParams->level_idc, ctx->profile_idc); /*Periodic IDR need SPS. We save the sequence header here*/ if (ctx->sRCParams.IDRFreq != 0) { if (NULL == ctx->save_seq_header_p) { ctx->save_seq_header_p = malloc(HEADER_SIZE); if (NULL == ctx->save_seq_header_p) { drv_debug_msg(VIDEO_DEBUG_ERROR, "Ran out of memory!\n"); free(pSequenceParams); return VA_STATUS_ERROR_ALLOCATION_FAILED; } memcpy((unsigned char *)ctx->save_seq_header_p, (unsigned char *)(cmdbuf->header_mem_p + ctx->seq_header_ofs), HEADER_SIZE); } } ctx->none_vcl_nal++; cmdbuf->cmd_idx_saved[PNW_CMDBUF_SEQ_HEADER_IDX] = cmdbuf->cmd_idx; /* Send to the last core as this will complete first */ pnw_cmdbuf_insert_command_package(ctx->obj_context, ctx->ParallelCores - 1, MTX_CMDID_DO_HEADER, &cmdbuf->header_mem, ctx->seq_header_ofs); free(pSequenceParams); return VA_STATUS_SUCCESS; } static VAStatus pnw__H264ES_insert_SEI_buffer_period(context_ENC_p ctx) { unsigned int ui32nal_initial_cpb_removal_delay; unsigned int ui32nal_initial_cpb_removal_delay_offset; pnw_cmdbuf_p cmdbuf = ctx->obj_context->pnw_cmdbuf; ui32nal_initial_cpb_removal_delay = 90000 * (1.0 * ctx->sRCParams.InitialDelay / ctx->sRCParams.BitsPerSecond); ui32nal_initial_cpb_removal_delay_offset = 90000 * (1.0 * ctx->buffer_size / ctx->sRCParams.BitsPerSecond) - ui32nal_initial_cpb_removal_delay; drv_debug_msg(VIDEO_DEBUG_GENERAL, "Insert SEI buffer period message with " "ui32nal_initial_cpb_removal_delay(%d) and " "ui32nal_initial_cpb_removal_delay_offset(%d)\n", ui32nal_initial_cpb_removal_delay, ui32nal_initial_cpb_removal_delay_offset); memset(cmdbuf->header_mem_p + ctx->sei_buf_prd_ofs, 0, HEADER_SIZE); pnw__H264_prepare_SEI_buffering_period_header( (MTX_HEADER_PARAMS *)(cmdbuf->header_mem_p + ctx->sei_buf_prd_ofs), 1, //ui8NalHrdBpPresentFlag, 0, //ui8nal_cpb_cnt_minus1, 1 + ctx->VUI_Params.initial_cpb_removal_delay_length_minus1, //ui8nal_initial_cpb_removal_delay_length, ui32nal_initial_cpb_removal_delay, //ui32nal_initial_cpb_removal_delay, ui32nal_initial_cpb_removal_delay_offset, //ui32nal_initial_cpb_removal_delay_offset, 0, //ui8VclHrdBpPresentFlag, NOT_USED_BY_TOPAZ, //ui8vcl_cpb_cnt_minus1, 0, //ui32vcl_initial_cpb_removal_delay, 0 //ui32vcl_initial_cpb_removal_delay_offset ); cmdbuf->cmd_idx_saved[PNW_CMDBUF_SEI_BUF_PERIOD_IDX] = cmdbuf->cmd_idx; pnw_cmdbuf_insert_command_package(ctx->obj_context, ctx->ParallelCores - 1, MTX_CMDID_DO_HEADER, &cmdbuf->header_mem, ctx->sei_buf_prd_ofs); ctx->none_vcl_nal++; return VA_STATUS_SUCCESS; } static VAStatus pnw__H264ES_insert_SEI_pic_timing(context_ENC_p ctx) { pnw_cmdbuf_p cmdbuf = ctx->obj_context->pnw_cmdbuf; uint32_t ui32cpb_removal_delay; drv_debug_msg(VIDEO_DEBUG_GENERAL, "Insert SEI picture timing message. \n"); memset(cmdbuf->header_mem_p + ctx->sei_pic_tm_ofs, 0, HEADER_SIZE); /* ui32cpb_removal_delay is zero for 1st frame and will be reset * after a IDR frame */ if (ctx->obj_context->frame_count == 0) { if (ctx->raw_frame_count == 0) ui32cpb_removal_delay = 0; else ui32cpb_removal_delay = ctx->sRCParams.IDRFreq * ctx->sRCParams.IntraFreq * 2; } else ui32cpb_removal_delay = 2 * ctx->obj_context->frame_count; pnw__H264_prepare_SEI_picture_timing_header( (MTX_HEADER_PARAMS *)(cmdbuf->header_mem_p + ctx->sei_pic_tm_ofs), 1, ctx->VUI_Params.cpb_removal_delay_length_minus1, ctx->VUI_Params.dpb_output_delay_length_minus1, ui32cpb_removal_delay, //ui32cpb_removal_delay, 2, //ui32dpb_output_delay, 0, //ui8pic_struct_present_flag, 0, //ui8pic_struct, 0, //ui8NumClockTS, 0, //*aui8clock_timestamp_flag, 0, //ui8full_timestamp_flag, 0, //ui8seconds_flag, 0, //ui8minutes_flag, 0, //ui8hours_flag, 0, //ui8seconds_value, 0, //ui8minutes_value, 0, //ui8hours_value, 0, //ui8ct_type, 0, //ui8nuit_field_based_flag, 0, //ui8counting_type, 0, //ui8discontinuity_flag, 0, //ui8cnt_dropped_flag, 0, //ui8n_frames, 0, //ui8time_offset_length, 0 //i32time_offset) ); cmdbuf->cmd_idx_saved[PNW_CMDBUF_SEI_PIC_TIMING_IDX] = cmdbuf->cmd_idx; pnw_cmdbuf_insert_command_package(ctx->obj_context, ctx->ParallelCores - 1, MTX_CMDID_DO_HEADER, &cmdbuf->header_mem, ctx->sei_pic_tm_ofs); ctx->none_vcl_nal++; return VA_STATUS_SUCCESS; } #if PSB_MFLD_DUMMY_CODE static VAStatus pnw__H264ES_insert_SEI_FPA_param(context_ENC_p ctx, object_buffer_p obj_buffer) { VAStatus vaStatus = VA_STATUS_SUCCESS; pnw_cmdbuf_p cmdbuf = ctx->obj_context->pnw_cmdbuf; VAEncPackedHeaderParameterBuffer *sei_param_buf = (VAEncPackedHeaderParameterBuffer *)obj_buffer->buffer_data; drv_debug_msg(VIDEO_DEBUG_GENERAL, "Insert SEI frame packing arrangement message. \n"); ctx->sei_pic_data_size = sei_param_buf->bit_length/8; return VA_STATUS_SUCCESS; } static VAStatus pnw__H264ES_insert_SEI_FPA_data(context_ENC_p ctx, object_buffer_p obj_buffer) { VAStatus vaStatus = VA_STATUS_SUCCESS; pnw_cmdbuf_p cmdbuf = ctx->obj_context->pnw_cmdbuf; char *sei_data_buf; drv_debug_msg(VIDEO_DEBUG_GENERAL, "Insert SEI frame packing arrangement message. \n"); memset(cmdbuf->header_mem_p + ctx->sei_pic_fpa_ofs, 0, HEADER_SIZE); sei_data_buf = (char *)obj_buffer->buffer_data; pnw__H264_prepare_SEI_FPA_header((MTX_HEADER_PARAMS *)(cmdbuf->header_mem_p + ctx->sei_pic_fpa_ofs), sei_data_buf, ctx->sei_pic_data_size); pnw_cmdbuf_insert_command_package(ctx->obj_context, ctx->ParallelCores - 1, MTX_CMDID_DO_HEADER, &cmdbuf->header_mem, ctx->sei_pic_fpa_ofs); return VA_STATUS_SUCCESS; } #endif static VAStatus pnw__H264ES_process_picture_param(context_ENC_p ctx, object_buffer_p obj_buffer) { VAStatus vaStatus = VA_STATUS_SUCCESS; int i; VAEncPictureParameterBufferH264 *pBuffer; int need_sps = 0; pnw_cmdbuf_p cmdbuf = ctx->obj_context->pnw_cmdbuf; ASSERT(obj_buffer->type == VAEncPictureParameterBufferType); if ((obj_buffer->num_elements != 1) || (obj_buffer->size != sizeof(VAEncPictureParameterBufferH264))) { return VA_STATUS_ERROR_UNKNOWN; } /* Transfer ownership of VAEncPictureParameterBufferH264 data */ pBuffer = (VAEncPictureParameterBufferH264 *) obj_buffer->buffer_data; obj_buffer->buffer_data = NULL; obj_buffer->size = 0; ctx->ref_surface = SURFACE(pBuffer->ReferenceFrames[0].picture_id); ctx->dest_surface = SURFACE(pBuffer->CurrPic.picture_id); ctx->coded_buf = BUFFER(pBuffer->coded_buf); //ASSERT(ctx->Width == pBuffer->picture_width); //ASSERT(ctx->Height == pBuffer->picture_height); if (NULL == ctx->coded_buf) { drv_debug_msg(VIDEO_DEBUG_ERROR, "%s L%d Invalid coded buffer handle\n", __FUNCTION__, __LINE__); free(pBuffer); return VA_STATUS_ERROR_INVALID_BUFFER; } if ((ctx->sRCParams.IntraFreq != 0) && (ctx->sRCParams.IDRFreq != 0)) { /* period IDR is desired */ unsigned int is_intra = 0; unsigned int intra_cnt = 0; ctx->force_idr_h264 = 0; if ((ctx->obj_context->frame_count % ctx->sRCParams.IntraFreq) == 0) { is_intra = 1; /* suppose current frame is I frame */ intra_cnt = ctx->obj_context->frame_count / ctx->sRCParams.IntraFreq; } /* current frame is I frame (suppose), and an IDR frame is desired*/ if ((is_intra) && ((intra_cnt % ctx->sRCParams.IDRFreq) == 0)) { ctx->force_idr_h264 = 1; /*When two consecutive access units in decoding order are both IDR access * units, the value of idr_pic_id in the slices of the first such IDR * access unit shall differ from the idr_pic_id in the second such IDR * access unit. We set it with 1 or 0 alternately.*/ ctx->idr_pic_id = 1 - ctx->idr_pic_id; /* it is periodic IDR in the middle of one sequence encoding, need SPS */ if (ctx->obj_context->frame_count > 0) need_sps = 1; ctx->obj_context->frame_count = 0; } } /* If VUI header isn't enabled, we'll igore the request for HRD header insertion */ if (ctx->bInserHRDParams) ctx->bInserHRDParams = ctx->bInsertVUI; /* For H264, PicHeader only needed in the first picture*/ if (!(ctx->obj_context->frame_count)) { cmdbuf = ctx->obj_context->pnw_cmdbuf; if (need_sps) { drv_debug_msg(VIDEO_DEBUG_GENERAL, "TOPAZ: insert a SPS before IDR frame\n"); /* reuse the previous SPS */ memcpy((unsigned char *)(cmdbuf->header_mem_p + ctx->seq_header_ofs), (unsigned char *)ctx->save_seq_header_p, HEADER_SIZE); cmdbuf->cmd_idx_saved[PNW_CMDBUF_SEQ_HEADER_IDX] = cmdbuf->cmd_idx; /* Send to the last core as this will complete first */ pnw_cmdbuf_insert_command_package(ctx->obj_context, ctx->ParallelCores - 1, MTX_CMDID_DO_HEADER, &cmdbuf->header_mem, ctx->seq_header_ofs); ctx->none_vcl_nal++; } if (ctx->bInserHRDParams) { pnw__H264ES_insert_SEI_buffer_period(ctx); pnw__H264ES_insert_SEI_pic_timing(ctx); } pnw__H264_prepare_picture_header(cmdbuf->header_mem_p + ctx->pic_header_ofs, IMG_FALSE, ctx->sRCParams.QCPOffset); cmdbuf->cmd_idx_saved[PNW_CMDBUF_PIC_HEADER_IDX] = cmdbuf->cmd_idx; /* Send to the last core as this will complete first */ pnw_cmdbuf_insert_command_package(ctx->obj_context, ctx->ParallelCores - 1, MTX_CMDID_DO_HEADER, &cmdbuf->header_mem, ctx->pic_header_ofs); ctx->none_vcl_nal++; } else if (ctx->bInserHRDParams) pnw__H264ES_insert_SEI_pic_timing(ctx); if (ctx->ParallelCores == 1) { ctx->coded_buf_per_slice = 0; drv_debug_msg(VIDEO_DEBUG_GENERAL, "TOPAZ: won't splite coded buffer(%d) since only one slice being encoded\n", ctx->coded_buf->size); } else { /*Make sure DMA start is 128bits alignment*/ ctx->coded_buf_per_slice = (ctx->coded_buf->size / ctx->ParallelCores) & (~0xf) ; drv_debug_msg(VIDEO_DEBUG_GENERAL, "TOPAZ: the size of coded_buf per slice %d( Total %d) \n", ctx->coded_buf_per_slice, ctx->coded_buf->size); } /* Prepare START_PICTURE params */ /* FIXME is really need multiple picParams? Need multiple calculate for each? */ for (i = (ctx->ParallelCores - 1); i >= 0; i--) vaStatus = pnw_RenderPictureParameter(ctx, i); free(pBuffer); return vaStatus; } static VAStatus pnw__H264ES_encode_one_slice(context_ENC_p ctx, VAEncSliceParameterBuffer *pBuffer) { pnw_cmdbuf_p cmdbuf = ctx->obj_context->pnw_cmdbuf; unsigned int MBSkipRun, FirstMBAddress; unsigned char deblock_idc; unsigned char is_intra = 0; int slice_param_idx; PIC_PARAMS *psPicParams = (PIC_PARAMS *)(cmdbuf->pic_params_p); VAStatus vaStatus = VA_STATUS_SUCCESS; /*Slice encoding Order: *1.Insert Do header command *2.setup InRowParams *3.setup Slice params *4.Insert Do slice command * */ if (pBuffer->slice_height > (ctx->Height / 16) || pBuffer->start_row_number > (ctx->Height / 16) || (pBuffer->slice_height + pBuffer->start_row_number) > (ctx->Height / 16)) { drv_debug_msg(VIDEO_DEBUG_ERROR, "slice height %d or start row number %d is too large", pBuffer->slice_height, pBuffer->start_row_number); return VA_STATUS_ERROR_INVALID_PARAMETER; } MBSkipRun = (pBuffer->slice_height * ctx->Width) / 16; deblock_idc = pBuffer->slice_flags.bits.disable_deblocking_filter_idc; /*If the frame is skipped, it shouldn't be a I frame*/ if (ctx->force_idr_h264 || (ctx->obj_context->frame_count == 0)) { is_intra = 1; } else is_intra = (ctx->sRCParams.RCEnable && ctx->sRCParams.FrameSkip) ? 0 : pBuffer->slice_flags.bits.is_intra; FirstMBAddress = (pBuffer->start_row_number * ctx->Width) / 16; memset(cmdbuf->header_mem_p + ctx->slice_header_ofs + ctx->obj_context->slice_count * HEADER_SIZE, 0, HEADER_SIZE); /* Insert Do Header command, relocation is needed */ pnw__H264_prepare_slice_header(cmdbuf->header_mem_p + ctx->slice_header_ofs + ctx->obj_context->slice_count * HEADER_SIZE, is_intra, pBuffer->slice_flags.bits.disable_deblocking_filter_idc, ctx->obj_context->frame_count, FirstMBAddress, MBSkipRun, 0, ctx->force_idr_h264, IMG_FALSE, IMG_FALSE, ctx->idr_pic_id); /* ensure that this slice is consequtive to that last processed by the target core */ /* ASSERT( -1 == ctx->LastSliceNum[ctx->SliceToCore] || ctx->obj_context->slice_count == 1 + ctx->LastSliceNum[ctx->SliceToCore] ); */ /* note the slice number the target core is now processing */ ctx->LastSliceNum[ctx->SliceToCore] = ctx->obj_context->slice_count; pnw_cmdbuf_insert_command_package(ctx->obj_context, ctx->SliceToCore, MTX_CMDID_DO_HEADER, &cmdbuf->header_mem, ctx->slice_header_ofs + ctx->obj_context->slice_count * HEADER_SIZE); if (!(ctx->sRCParams.RCEnable && ctx->sRCParams.FrameSkip)) { /*Only reset on the first frame. It's more effective than DDK. Have confirmed with IMG*/ if (ctx->obj_context->frame_count == 0) pnw_reset_encoder_params(ctx); if ((pBuffer->start_row_number == 0) && pBuffer->slice_flags.bits.is_intra) { ctx->BelowParamsBufIdx = (ctx->BelowParamsBufIdx + 1) & 0x1; } slice_param_idx = (pBuffer->slice_flags.bits.is_intra ? 0 : 1) * ctx->slice_param_num + ctx->obj_context->slice_count; if (VAEncSliceParameter_Equal(&ctx->slice_param_cache[slice_param_idx], pBuffer) == 0) { /* cache current param parameters */ memcpy(&ctx->slice_param_cache[slice_param_idx], pBuffer, sizeof(VAEncSliceParameterBuffer)); /* Setup InParams value*/ pnw_setup_slice_params(ctx, pBuffer->start_row_number * 16, pBuffer->slice_height * 16, pBuffer->slice_flags.bits.is_intra, ctx->obj_context->frame_count > 0, psPicParams->sInParams.SeInitQP); } /* Insert do slice command and setup related buffer value */ pnw__send_encode_slice_params(ctx, pBuffer->slice_flags.bits.is_intra, pBuffer->start_row_number * 16, deblock_idc, ctx->obj_context->frame_count, pBuffer->slice_height * 16, ctx->obj_context->slice_count); drv_debug_msg(VIDEO_DEBUG_GENERAL, "Now frame_count/slice_count is %d/%d\n", ctx->obj_context->frame_count, ctx->obj_context->slice_count); } ctx->obj_context->slice_count++; return vaStatus; } /* convert from VAEncSliceParameterBufferH264 to VAEncSliceParameterBuffer */ static VAStatus pnw__convert_sliceparameter_buffer(VAEncSliceParameterBufferH264 *pBufferH264, VAEncSliceParameterBuffer *pBuffer, int picture_width_in_mbs, unsigned int num_elemenent) { unsigned int i; for (i = 0; i < num_elemenent; i++) { pBuffer->start_row_number = pBufferH264->macroblock_address / picture_width_in_mbs; pBuffer->slice_height = pBufferH264->num_macroblocks / picture_width_in_mbs; pBuffer->slice_flags.bits.is_intra = (((pBufferH264->slice_type == 2) || (pBufferH264->slice_type == 7)) ? 1 : 0); pBuffer->slice_flags.bits.disable_deblocking_filter_idc = pBufferH264->disable_deblocking_filter_idc; /* next conversion */ pBuffer++; pBufferH264++; } return 0; } static VAStatus pnw__H264ES_process_slice_param(context_ENC_p ctx, object_buffer_p obj_buffer) { /* Prepare InParams for macros of current slice, insert slice header, insert do slice command */ VAEncSliceParameterBuffer *pBuf_per_core, *pBuffer; pnw_cmdbuf_p cmdbuf = ctx->obj_context->pnw_cmdbuf; PIC_PARAMS *psPicParams = (PIC_PARAMS *)(cmdbuf->pic_params_p); unsigned int i, j, slice_per_core; VAStatus vaStatus = VA_STATUS_SUCCESS; ASSERT(obj_buffer->type == VAEncSliceParameterBufferType); if (obj_buffer->num_elements > (ctx->Height / 16)) { vaStatus = VA_STATUS_ERROR_INVALID_PARAMETER; goto out2; } cmdbuf = ctx->obj_context->pnw_cmdbuf; psPicParams = (PIC_PARAMS *)cmdbuf->pic_params_p; /* Transfer ownership of VAEncPictureParameterBuffer data */ if (obj_buffer->size == sizeof(VAEncSliceParameterBufferH264)) { drv_debug_msg(VIDEO_DEBUG_GENERAL, "Receive VAEncSliceParameterBufferH264 buffer"); pBuffer = calloc(obj_buffer->num_elements, sizeof(VAEncSliceParameterBuffer)); if (pBuffer == NULL) { drv_debug_msg(VIDEO_DEBUG_ERROR, "Run out of memory!\n"); vaStatus = VA_STATUS_ERROR_ALLOCATION_FAILED; goto out2; } pnw__convert_sliceparameter_buffer((VAEncSliceParameterBufferH264 *)obj_buffer->buffer_data, pBuffer, ctx->Width / 16, obj_buffer->num_elements); } else if (obj_buffer->size == sizeof(VAEncSliceParameterBuffer)) { drv_debug_msg(VIDEO_DEBUG_GENERAL, "Receive VAEncSliceParameterBuffer buffer"); pBuffer = (VAEncSliceParameterBuffer *) obj_buffer->buffer_data; } else { drv_debug_msg(VIDEO_DEBUG_ERROR, "Buffer size(%d) is wrong. It should be %d or %d\n", obj_buffer->size, sizeof(VAEncSliceParameterBuffer), sizeof(VAEncSliceParameterBufferH264)); vaStatus = VA_STATUS_ERROR_INVALID_PARAMETER; goto out2; } obj_buffer->size = 0; /*In case the slice number changes*/ if ((ctx->slice_param_cache != NULL) && (obj_buffer->num_elements != ctx->slice_param_num)) { drv_debug_msg(VIDEO_DEBUG_GENERAL, "Slice number changes. Previous value is %d. Now it's %d\n", ctx->slice_param_num, obj_buffer->num_elements); free(ctx->slice_param_cache); ctx->slice_param_cache = NULL; ctx->slice_param_num = 0; } if (NULL == ctx->slice_param_cache) { ctx->slice_param_num = obj_buffer->num_elements; drv_debug_msg(VIDEO_DEBUG_GENERAL, "Allocate %d VAEncSliceParameterBuffer cache buffers\n", 2 * ctx->slice_param_num); ctx->slice_param_cache = calloc(2 * ctx->slice_param_num, sizeof(VAEncSliceParameterBuffer)); if (NULL == ctx->slice_param_cache) { drv_debug_msg(VIDEO_DEBUG_ERROR, "Run out of memory!\n"); /* free the converted VAEncSliceParameterBuffer */ if (obj_buffer->size == sizeof(VAEncSliceParameterBufferH264)) free(pBuffer); free(obj_buffer->buffer_data); return VA_STATUS_ERROR_ALLOCATION_FAILED; } } ctx->sRCParams.Slices = obj_buffer->num_elements; if (getenv("PSB_VIDEO_SIG_CORE") == NULL) { if ((ctx->ParallelCores == 2) && (obj_buffer->num_elements == 1)) { /*Need to replace unneccesary MTX_CMDID_STARTPICs with MTX_CMDID_PAD*/ for (i = 0; i < (ctx->ParallelCores - 1); i++) { *(cmdbuf->cmd_idx_saved[PNW_CMDBUF_START_PIC_IDX] + i * 4) &= (~MTX_CMDWORD_ID_MASK); *(cmdbuf->cmd_idx_saved[PNW_CMDBUF_START_PIC_IDX] + i * 4) |= MTX_CMDID_PAD; } drv_debug_msg(VIDEO_DEBUG_GENERAL, " Remove unneccesary %d MTX_CMDID_STARTPIC commands from cmdbuf\n", ctx->ParallelCores - obj_buffer->num_elements); ctx->ParallelCores = obj_buffer->num_elements; /* All header generation commands should be send to core 0*/ for (i = PNW_CMDBUF_SEQ_HEADER_IDX; i < PNW_CMDBUF_SAVING_MAX; i++) { if (cmdbuf->cmd_idx_saved[i] != 0) *(cmdbuf->cmd_idx_saved[i]) &= ~(MTX_CMDWORD_CORE_MASK << MTX_CMDWORD_CORE_SHIFT); } ctx->SliceToCore = ctx->ParallelCores - 1; } } slice_per_core = obj_buffer->num_elements / ctx->ParallelCores; pBuf_per_core = pBuffer; for (i = 0; i < slice_per_core; i++) { pBuffer = pBuf_per_core; for (j = 0; j < ctx->ParallelCores; j++) { vaStatus = pnw__H264ES_encode_one_slice(ctx, pBuffer); if (vaStatus != VA_STATUS_SUCCESS) goto out1; if (0 == ctx->SliceToCore) { ctx->SliceToCore = ctx->ParallelCores; } ctx->SliceToCore--; ASSERT(ctx->obj_context->slice_count < MAX_SLICES_PER_PICTURE); /*Move to the next buffer which will be sent to core j*/ pBuffer += slice_per_core; } pBuf_per_core++; /* Move to the next buffer */ } /*Cope with last slice when slice number is odd and parallelCores is even*/ if (obj_buffer->num_elements > (slice_per_core * ctx->ParallelCores)) { ctx->SliceToCore = 0; pBuffer -= slice_per_core; pBuffer ++; vaStatus = pnw__H264ES_encode_one_slice(ctx, pBuffer); } out1: /* free the converted VAEncSliceParameterBuffer */ if (obj_buffer->size == sizeof(VAEncSliceParameterBufferH264)) free(pBuffer); out2: free(obj_buffer->buffer_data); obj_buffer->buffer_data = NULL; return vaStatus; } static VAStatus pnw__H264ES_process_misc_param(context_ENC_p ctx, object_buffer_p obj_buffer) { /* Prepare InParams for macros of current slice, insert slice header, insert do slice command */ VAEncMiscParameterBuffer *pBuffer; VAEncMiscParameterRateControl *rate_control_param; VAEncMiscParameterAIR *air_param; VAEncMiscParameterMaxSliceSize *max_slice_size_param; VAEncMiscParameterFrameRate *frame_rate_param; VAEncMiscParameterHRD *hrd_param; VAStatus vaStatus = VA_STATUS_SUCCESS; unsigned int max_bps; unsigned int frame_size; ASSERT(obj_buffer->type == VAEncMiscParameterBufferType); /* Transfer ownership of VAEncMiscParameterBuffer data */ pBuffer = (VAEncMiscParameterBuffer *) obj_buffer->buffer_data; obj_buffer->size = 0; if (ctx->eCodec != IMG_CODEC_H264_VCM && (pBuffer->type != VAEncMiscParameterTypeHRD && pBuffer->type != VAEncMiscParameterTypeRateControl && pBuffer->type != VAEncMiscParameterTypeFrameRate)) { drv_debug_msg(VIDEO_DEBUG_GENERAL, "Buffer type %d isn't supported in none VCM mode.\n", pBuffer->type); free(obj_buffer->buffer_data); obj_buffer->buffer_data = NULL; return VA_STATUS_SUCCESS; } switch (pBuffer->type) { case VAEncMiscParameterTypeFrameRate: frame_rate_param = (VAEncMiscParameterFrameRate *)pBuffer->data; if (frame_rate_param->framerate < 1 || frame_rate_param->framerate > 65535) { vaStatus = VA_STATUS_ERROR_INVALID_PARAMETER; break; } if (ctx->sRCParams.FrameRate == frame_rate_param->framerate) break; drv_debug_msg(VIDEO_DEBUG_GENERAL, "frame rate changed from %d to %d\n", ctx->sRCParams.FrameRate, frame_rate_param->framerate); ctx->sRCParams.FrameRate = frame_rate_param->framerate; ctx->sRCParams.bBitrateChanged = IMG_TRUE; ctx->sRCParams.FrameRate = (frame_rate_param->framerate < 1) ? 1 : ((65535 < frame_rate_param->framerate) ? 65535 : frame_rate_param->framerate); break; case VAEncMiscParameterTypeRateControl: rate_control_param = (VAEncMiscParameterRateControl *)pBuffer->data; /* Currently, none VCM mode only supports frame skip and bit stuffing * disable flag and doesn't accept other parameters in * buffer of VAEncMiscParameterTypeRateControl type */ if (rate_control_param->rc_flags.value != 0 || ctx->raw_frame_count == 0) { if (rate_control_param->rc_flags.bits.disable_frame_skip) ctx->sRCParams.bDisableFrameSkipping = IMG_TRUE; if (rate_control_param->rc_flags.bits.disable_bit_stuffing) ctx->sRCParams.bDisableBitStuffing = IMG_TRUE; drv_debug_msg(VIDEO_DEBUG_GENERAL, "bDisableFrameSkipping is %d and bDisableBitStuffing is %d\n", ctx->sRCParams.bDisableFrameSkipping, ctx->sRCParams.bDisableBitStuffing); } if (rate_control_param->initial_qp > 51 || rate_control_param->min_qp > 51) { drv_debug_msg(VIDEO_DEBUG_ERROR, "Initial_qp(%d) and min_qpinitial_qp(%d) " "are invalid.\nQP shouldn't be larger than 51 for H264\n", rate_control_param->initial_qp, rate_control_param->min_qp); vaStatus = VA_STATUS_ERROR_INVALID_PARAMETER; break; } if (rate_control_param->window_size > 2000) { drv_debug_msg(VIDEO_DEBUG_ERROR, "window_size is too much!\n"); vaStatus = VA_STATUS_ERROR_INVALID_PARAMETER; break; } /* Check if any none-zero RC parameter is changed*/ if ((rate_control_param->bits_per_second == 0 || rate_control_param->bits_per_second == ctx->sRCParams.BitsPerSecond) && (rate_control_param->window_size == 0 || ctx->sRCParams.BufferSize == ctx->sRCParams.BitsPerSecond / 1000 * rate_control_param->window_size) && (ctx->sRCParams.MinQP == rate_control_param->min_qp) && (ctx->sRCParams.InitialQp == rate_control_param->initial_qp) && (rate_control_param->basic_unit_size == 0 || ctx->sRCParams.BUSize == rate_control_param->basic_unit_size)) { drv_debug_msg(VIDEO_DEBUG_GENERAL, "%s No RC parameter is changed\n", __FUNCTION__); break; } else if (ctx->raw_frame_count != 0 || ctx->eCodec == IMG_CODEC_H264_VCM) ctx->sRCParams.bBitrateChanged = IMG_TRUE; /* The initial target bitrate is set by Sequence parameter buffer. Here is for changed bitrate only */ if (rate_control_param->bits_per_second > TOPAZ_H264_MAX_BITRATE) { drv_debug_msg(VIDEO_DEBUG_ERROR, " bits_per_second(%d) exceeds \ the maximum bitrate, set it with %d\n", rate_control_param->bits_per_second, TOPAZ_H264_MAX_BITRATE); break; } /* The initial target bitrate is set by Sequence parameter buffer. Here is for changed bitrate only */ if (rate_control_param->bits_per_second != 0 && ctx->raw_frame_count != 0) { drv_debug_msg(VIDEO_DEBUG_GENERAL, "bitrate is changed from %d to %d on frame %d\n", ctx->sRCParams.BitsPerSecond, rate_control_param->bits_per_second, ctx->raw_frame_count); max_bps = (ctx->Width * ctx->Height * 3 / 2 ) * 8 * ctx->sRCParams.FrameRate; if (ctx->Width > 720) max_bps /= 4; else max_bps /= 2; drv_debug_msg(VIDEO_DEBUG_GENERAL, " width %d height %d, frame rate %d\n", ctx->Width, ctx->Height, ctx->sRCParams.FrameRate); if (rate_control_param->bits_per_second > max_bps) { drv_debug_msg(VIDEO_DEBUG_ERROR, "Invalid bitrate %d, violate ITU-T Rec. H.264 (03/2005) A.3.1" "\n clip to %d bps\n", rate_control_param->bits_per_second, max_bps); ctx->sRCParams.BitsPerSecond = max_bps; } else { /* See 110% target bitrate for VCM. Otherwise, the resulted bitrate is much lower than target bitrate */ if (ctx->eCodec == IMG_CODEC_H264_VCM) rate_control_param->bits_per_second = rate_control_param->bits_per_second / 100 * 110; drv_debug_msg(VIDEO_DEBUG_GENERAL, "Bitrate is set to %d\n", rate_control_param->bits_per_second); ctx->sRCParams.BitsPerSecond = rate_control_param->bits_per_second; } } if (rate_control_param->min_qp != 0) ctx->sRCParams.MinQP = rate_control_param->min_qp; if (rate_control_param->window_size != 0) { ctx->sRCParams.BufferSize = ctx->sRCParams.BitsPerSecond / 1000 * rate_control_param->window_size; if (ctx->sRCParams.FrameRate == 0) { drv_debug_msg(VIDEO_DEBUG_ERROR, "frame rate can't be zero. Set it to 30"); ctx->sRCParams.FrameRate = 30; } frame_size = ctx->sRCParams.BitsPerSecond / ctx->sRCParams.FrameRate; if (frame_size == 0) { drv_debug_msg(VIDEO_DEBUG_ERROR, "Bitrate is too low %d\n", ctx->sRCParams.BitsPerSecond); break; } ctx->sRCParams.InitialLevel = (3 * ctx->sRCParams.BufferSize) >> 4; ctx->sRCParams.InitialLevel += (frame_size / 2); ctx->sRCParams.InitialLevel /= frame_size; ctx->sRCParams.InitialLevel *= frame_size; ctx->sRCParams.InitialDelay = ctx->sRCParams.BufferSize - ctx->sRCParams.InitialLevel; } if (rate_control_param->initial_qp != 0) ctx->sRCParams.InitialQp = rate_control_param->initial_qp; if (rate_control_param->basic_unit_size != 0) ctx->sRCParams.BUSize = rate_control_param->basic_unit_size; drv_debug_msg(VIDEO_DEBUG_GENERAL, "Set Misc parameters(frame %d): window_size %d, initial qp %d\n" \ "\tmin qp %d, bunit size %d\n", ctx->raw_frame_count, rate_control_param->window_size, rate_control_param->initial_qp, rate_control_param->min_qp, rate_control_param->basic_unit_size); break; case VAEncMiscParameterTypeMaxSliceSize: max_slice_size_param = (VAEncMiscParameterMaxSliceSize *)pBuffer->data; /*The max slice size should not be bigger than 1920x1080x1.5x8 */ if (max_slice_size_param->max_slice_size > 24883200) { drv_debug_msg(VIDEO_DEBUG_ERROR,"Invalid max_slice_size. It should be 1~ 24883200.\n"); vaStatus = VA_STATUS_ERROR_INVALID_PARAMETER; break; } if (ctx->max_slice_size == max_slice_size_param->max_slice_size) break; drv_debug_msg(VIDEO_DEBUG_GENERAL, "max slice size changed to %d\n", max_slice_size_param->max_slice_size); ctx->max_slice_size = max_slice_size_param->max_slice_size; break; case VAEncMiscParameterTypeAIR: air_param = (VAEncMiscParameterAIR *)pBuffer->data; if (air_param->air_num_mbs > 65535 || air_param->air_threshold > 65535) { vaStatus = VA_STATUS_ERROR_INVALID_PARAMETER; break; } drv_debug_msg(VIDEO_DEBUG_GENERAL,"air slice size changed to num_air_mbs %d " "air_threshold %d, air_auto %d\n", air_param->air_num_mbs, air_param->air_threshold, air_param->air_auto); if (((ctx->Height * ctx->Width) >> 8) < (int)air_param->air_num_mbs) air_param->air_num_mbs = ((ctx->Height * ctx->Width) >> 8); if (air_param->air_threshold == 0) drv_debug_msg(VIDEO_DEBUG_GENERAL,"%s: air threshold is set to zero\n", __func__); ctx->num_air_mbs = air_param->air_num_mbs; ctx->air_threshold = air_param->air_threshold; //ctx->autotune_air_flag = air_param->air_auto; break; case VAEncMiscParameterTypeHRD: hrd_param = (VAEncMiscParameterHRD *)pBuffer->data; if (hrd_param->buffer_size == 0 || hrd_param->initial_buffer_fullness == 0) drv_debug_msg(VIDEO_DEBUG_GENERAL, "Find zero value for buffer_size " "and initial_buffer_fullness.\n" "Will assign default value to them later \n"); if (ctx->initial_buffer_fullness > ctx->buffer_size) { drv_debug_msg(VIDEO_DEBUG_ERROR, "initial_buffer_fullnessi(%d) shouldn't be" " larger that buffer_size(%d)!\n", hrd_param->initial_buffer_fullness, hrd_param->buffer_size); vaStatus = VA_STATUS_ERROR_INVALID_PARAMETER; break; } if (!ctx->sRCParams.RCEnable) { drv_debug_msg(VIDEO_DEBUG_ERROR, "Only when rate control is enabled," " VAEncMiscParameterTypeHRD will take effect.\n"); break; } ctx->buffer_size = hrd_param->buffer_size; ctx->initial_buffer_fullness = hrd_param->initial_buffer_fullness; ctx->bInserHRDParams = IMG_TRUE; drv_debug_msg(VIDEO_DEBUG_GENERAL, "hrd param buffer_size set to %d " "initial buffer fullness set to %d\n", ctx->buffer_size, ctx->initial_buffer_fullness); break; default: vaStatus = VA_STATUS_ERROR_UNKNOWN; DEBUG_FAILURE; break; } free(obj_buffer->buffer_data); obj_buffer->buffer_data = NULL; return vaStatus; } static VAStatus pnw_H264ES_RenderPicture( object_context_p obj_context, object_buffer_p *buffers, int num_buffers) { INIT_CONTEXT_H264ES; VAStatus vaStatus = VA_STATUS_SUCCESS; int i; drv_debug_msg(VIDEO_DEBUG_GENERAL,"pnw_H264ES_RenderPicture\n"); for (i = 0; i < num_buffers; i++) { object_buffer_p obj_buffer = buffers[i]; switch (obj_buffer->type) { case VAEncSequenceParameterBufferType: drv_debug_msg(VIDEO_DEBUG_GENERAL, "pnw_H264_RenderPicture got VAEncSequenceParameterBufferType\n"); vaStatus = pnw__H264ES_process_sequence_param(ctx, obj_buffer); DEBUG_FAILURE; break; case VAEncPictureParameterBufferType: drv_debug_msg(VIDEO_DEBUG_GENERAL, "pnw_H264_RenderPicture got VAEncPictureParameterBuffer\n"); vaStatus = pnw__H264ES_process_picture_param(ctx, obj_buffer); DEBUG_FAILURE; break; case VAEncSliceParameterBufferType: drv_debug_msg(VIDEO_DEBUG_GENERAL, "pnw_H264_RenderPicture got VAEncSliceParameterBufferType\n"); vaStatus = pnw__H264ES_process_slice_param(ctx, obj_buffer); DEBUG_FAILURE; break; case VAEncMiscParameterBufferType: drv_debug_msg(VIDEO_DEBUG_GENERAL, "pnw_H264_RenderPicture got VAEncMiscParameterBufferType\n"); vaStatus = pnw__H264ES_process_misc_param(ctx, obj_buffer); DEBUG_FAILURE; break; #if PSB_MFLD_DUMMY_CODE case VAEncPackedHeaderParameterBufferType: drv_debug_msg(VIDEO_DEBUG_GENERAL, "pnw_H264_RenderPicture got VAEncPackedHeaderParameterBufferType\n"); vaStatus = pnw__H264ES_insert_SEI_FPA_param(ctx, obj_buffer); DEBUG_FAILURE; break; case VAEncPackedHeaderDataBufferType: drv_debug_msg(VIDEO_DEBUG_GENERAL, "pnw_H264_RenderPicture got VAEncPackedHeaderDataBufferType\n"); vaStatus = pnw__H264ES_insert_SEI_FPA_data(ctx, obj_buffer); DEBUG_FAILURE; break; #endif default: vaStatus = VA_STATUS_ERROR_UNKNOWN; DEBUG_FAILURE; } if (vaStatus != VA_STATUS_SUCCESS) { break; } } return vaStatus; } static VAStatus pnw_H264ES_EndPicture( object_context_p obj_context) { INIT_CONTEXT_H264ES; pnw_cmdbuf_p cmdbuf = ctx->obj_context->pnw_cmdbuf; PIC_PARAMS *psPicParams = (PIC_PARAMS *)cmdbuf->pic_params_p; VAStatus vaStatus = VA_STATUS_SUCCESS; unsigned char core = 0; drv_debug_msg(VIDEO_DEBUG_GENERAL, "pnw_H264ES_EndPicture\n"); /* Unlike MPEG4 and H263, slices number is defined by user */ for (core = 0; core < ctx->ParallelCores; core++) { psPicParams = (PIC_PARAMS *) (cmdbuf->pic_params_p + ctx->pic_params_size * core); psPicParams->NumSlices = ctx->sRCParams.Slices; } vaStatus = pnw_EndPicture(ctx); return vaStatus; } struct format_vtable_s pnw_H264ES_vtable = { queryConfigAttributes: pnw_H264ES_QueryConfigAttributes, validateConfig: pnw_H264ES_ValidateConfig, createContext: pnw_H264ES_CreateContext, destroyContext: pnw_H264ES_DestroyContext, beginPicture: pnw_H264ES_BeginPicture, renderPicture: pnw_H264ES_RenderPicture, endPicture: pnw_H264ES_EndPicture }; VAStatus pnw_set_frame_skip_flag( object_context_p obj_context) { INIT_CONTEXT_H264ES; VAStatus vaStatus = VA_STATUS_SUCCESS; if (ctx && ctx->previous_src_surface) { SET_SURFACE_INFO_skipped_flag(ctx->previous_src_surface->psb_surface, 1); drv_debug_msg(VIDEO_DEBUG_GENERAL, "Detected a skipped frame for surface 0x%08x.\n", ctx->previous_src_surface->psb_surface); } return vaStatus; }
457088.c
/* * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved. */ /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ /* All Rights Reserved */ /* * Copyright (c) 1980 Regents of the University of California. * All rights reserved. The Berkeley Software License Agreement * specifies the terms and conditions for redistribution. */ #include "sh.h" #include <dirent.h> #include <string.h> #include "sh.tconst.h" /* * C shell */ /* * System level search and execute of a command. * We look in each directory for the specified command name. * If the name contains a '/' then we execute only the full path name. * If there is no search path then we execute only full path names. */ /* * As we search for the command we note the first non-trivial error * message for presentation to the user. This allows us often * to show that a file has the wrong mode/no access when the file * is not in the last component of the search path, so we must * go on after first detecting the error. */ char *exerr; /* Execution error message */ void pexerr(void); void texec(struct command *, tchar *, tchar **); void xechoit(tchar **); void dohash(char []); static void tconvert(struct command *, tchar *, tchar **); extern DIR *opendir_(tchar *); void doexec(struct command *t) { tchar *sav; tchar *dp, **pv, **av; struct varent *v; bool slash; int hashval, hashval1, i; tchar *blk[2]; #ifdef TRACE tprintf("TRACE- doexec()\n"); #endif /* * Glob the command name. If this does anything, then we * will execute the command only relative to ".". One special * case: if there is no PATH, then we execute only commands * which start with '/'. */ dp = globone(t->t_dcom[0]); sav = t->t_dcom[0]; exerr = 0; t->t_dcom[0] = dp; setname(dp); xfree(sav); v = adrof(S_path /* "path" */); if (v == 0 && dp[0] != '/') { pexerr(); } slash = gflag; /* * Glob the argument list, if necessary. * Otherwise trim off the quote bits. */ gflag = 0; av = &t->t_dcom[1]; tglob(av); if (gflag) { av = glob(av); if (av == 0) error("No match"); } blk[0] = t->t_dcom[0]; blk[1] = 0; av = blkspl(blk, av); #ifdef VFORK Vav = av; #endif trim(av); slash |= any('/', av[0]); xechoit(av); /* Echo command if -x */ /* * Since all internal file descriptors are set to close on exec, * we don't need to close them explicitly here. Just reorient * ourselves for error messages. */ SHIN = 0; SHOUT = 1; SHDIAG = 2; OLDSTD = 0; /* * We must do this AFTER any possible forking (like `foo` * in glob) so that this shell can still do subprocesses. */ (void) sigsetmask(0); /* * If no path, no words in path, or a / in the filename * then restrict the command search. */ if (v == 0 || v->vec[0] == 0 || slash) pv = justabs; else pv = v->vec; /* / command name for postpending */ sav = strspl(S_SLASH /* "/" */, *av); #ifdef VFORK Vsav = sav; #endif if (havhash) hashval = hashname(*av); i = 0; #ifdef VFORK hits++; #endif do { if (!slash && pv[0][0] == '/' && havhash) { hashval1 = hash(hashval, i); if (!bit(xhash, hashval1)) goto cont; } /* don't make ./xxx */ if (pv[0][0] == 0 || eq(pv[0], S_DOT /* "." */)) { texec(t, *av, av); } else { dp = strspl(*pv, sav); #ifdef VFORK Vdp = dp; #endif texec(t, dp, av); #ifdef VFORK Vdp = 0; #endif xfree(dp); } #ifdef VFORK misses++; #endif cont: pv++; i++; } while (*pv); #ifdef VFORK hits--; #endif #ifdef VFORK Vsav = 0; Vav = 0; #endif xfree(sav); xfree((char *)av); pexerr(); } void pexerr(void) { #ifdef TRACE tprintf("TRACE- pexerr()\n"); #endif /* Couldn't find the damn thing */ if (exerr) bferr(exerr); bferr("Command not found"); } /* * Execute command f, arg list t. * Record error message if not found. * Also do shell scripts here. */ void texec(struct command *cmd, tchar *f, tchar **t) { struct varent *v; tchar **vp; tchar *lastsh[2]; #ifdef TRACE tprintf("TRACE- texec()\n"); #endif /* convert cfname and cargs from tchar to char */ tconvert(cmd, f, t); execv(cmd->cfname, cmd->cargs); /* * exec returned, free up allocations from above * tconvert(), zero cfname and cargs to prevent * duplicate free() in freesyn() */ xfree(cmd->cfname); chr_blkfree(cmd->cargs); cmd->cfname = (char *)0; cmd->cargs = (char **)0; switch (errno) { case ENOEXEC: /* check that this is not a binary file */ { int ff = open_(f, 0); tchar ch[MB_LEN_MAX]; if (ff != -1 && read_(ff, ch, 1) == 1 && !isprint(ch[0]) && !isspace(ch[0])) { printf("Cannot execute binary file.\n"); Perror(f); (void) close(ff); unsetfd(ff); return; } (void) close(ff); unsetfd(ff); } /* * If there is an alias for shell, then * put the words of the alias in front of the * argument list replacing the command name. * Note no interpretation of the words at this point. */ v = adrof1(S_shell /* "shell" */, &aliases); if (v == 0) { #ifdef OTHERSH int ff = open_(f, 0); tchar ch[MB_LEN_MAX]; #endif vp = lastsh; vp[0] = adrof(S_shell /* "shell" */) ? value(S_shell /* "shell" */) : S_SHELLPATH /* SHELLPATH */; vp[1] = (tchar *) NULL; #ifdef OTHERSH if (ff != -1 && read_(ff, ch, 1) == 1 && ch[0] != '#') vp[0] = S_OTHERSH /* OTHERSH */; (void) close(ff); unsetfd(ff); #endif } else vp = v->vec; t[0] = f; t = blkspl(vp, t); /* Splice up the new arglst */ f = *t; tconvert(cmd, f, t); /* convert tchar to char */ /* * now done with tchar arg list t, * free the space calloc'd by above blkspl() */ xfree((char *)t); execv(cmd->cfname, cmd->cargs); /* exec the command */ /* exec returned, same free'ing as above */ xfree(cmd->cfname); chr_blkfree(cmd->cargs); cmd->cfname = (char *)0; cmd->cargs = (char **)0; /* The sky is falling, the sky is falling! */ case ENOMEM: Perror(f); case ENOENT: break; default: if (exerr == 0) { exerr = strerror(errno); setname(f); } } } static void tconvert(struct command *cmd, tchar *fname, tchar **list) { char **rc; int len; cmd->cfname = tstostr(NULL, fname); len = blklen(list); rc = cmd->cargs = (char **) xcalloc((uint_t)(len + 1), sizeof (char **)); while (len--) *rc++ = tstostr(NULL, *list++); *rc = NULL; } /*ARGSUSED*/ void execash(tchar **t, struct command *kp) { #ifdef TRACE tprintf("TRACE- execash()\n"); #endif rechist(); (void) signal(SIGINT, parintr); (void) signal(SIGQUIT, parintr); (void) signal(SIGTERM, parterm); /* if doexec loses, screw */ lshift(kp->t_dcom, 1); exiterr++; doexec(kp); /*NOTREACHED*/ } void xechoit(tchar **t) { #ifdef TRACE tprintf("TRACE- xechoit()\n"); #endif if (adrof(S_echo /* "echo" */)) { flush(); haderr = 1; blkpr(t), Putchar('\n'); haderr = 0; } } /* * This routine called when user enters "rehash". * Both the path and cdpath caching arrays will * be rehashed, via calling dohash. If either * variable is not set with a value, then dohash * just exits. */ void dorehash(void) { dohash(xhash); dohash(xhash2); } /* * Fill up caching arrays for path and cdpath */ void dohash(char cachearray[]) { struct stat stb; DIR *dirp; struct dirent *dp; int cnt; int i = 0; struct varent *v; tchar **pv; int hashval; tchar curdir_[MAXNAMLEN+1]; #ifdef TRACE tprintf("TRACE- dohash()\n"); #endif /* Caching $path */ if (cachearray == xhash) { havhash = 1; v = adrof(S_path /* "path" */); } else { /* Caching $cdpath */ havhash2 = 1; v = adrof(S_cdpath /* "cdpath" */); } for (cnt = 0; cnt < (HSHSIZ / 8); cnt++) cachearray[cnt] = 0; if (v == 0) return; for (pv = v->vec; *pv; pv++, i++) { if (pv[0][0] != '/') continue; dirp = opendir_(*pv); if (dirp == NULL) continue; if (fstat(dirp->dd_fd, &stb) < 0 || !isdir(stb)) { unsetfd(dirp->dd_fd); closedir_(dirp); continue; } while ((dp = readdir(dirp)) != NULL) { if (dp->d_ino == 0) continue; if (dp->d_name[0] == '.' && (dp->d_name[1] == '\0' || dp->d_name[1] == '.' && dp->d_name[2] == '\0')) continue; hashval = hash(hashname(strtots(curdir_, dp->d_name)), i); bis(cachearray, hashval); } unsetfd(dirp->dd_fd); closedir_(dirp); } } void dounhash(void) { #ifdef TRACE tprintf("TRACE- dounhash()\n"); #endif havhash = 0; havhash2 = 0; } #ifdef VFORK void hashstat(void) { #ifdef TRACE tprintf("TRACE- hashstat_()\n"); #endif if (hits+misses) printf("%d hits, %d misses, %d%%\n", hits, misses, 100 * hits / (hits + misses)); } #endif /* * Hash a command name. */ int hashname(tchar *cp) { long h = 0; #ifdef TRACE tprintf("TRACE- hashname()\n"); #endif while (*cp) h = hash(h, *cp++); return ((int)h); }
358783.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s --check-prefix=C // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX // expected-no-diagnostics int also_before(void) { return 1; } #pragma omp begin declare variant match(implementation={vendor(llvm)}) int also_after(void) { return 0; } int also_before(void) { return 0; } #pragma omp end declare variant int also_after(void) { return 2; } int main() { // Should return 0. return (also_after)() + (also_before)() + (&also_after)() + (&also_before)(); } // Make sure: // - we see the specialization in the AST // - we pick the right callees // C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // C-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1 // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})' // C-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1> // C-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10> // C-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})' // C-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1> // C-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10> // C-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0 // C-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})' // C-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1> // C-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10> // C-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 2 // C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)} // C-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: `-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:28:1> line:22:5 main 'int ({{.*}})' // C-NEXT: `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:12, line:28:1> // C-NEXT: `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <line:24:3, line:27:25> // C-NEXT: `-BinaryOperator [[ADDR_25:0x[a-z0-9]*]] <line:24:10, line:27:25> 'int' '+' // C-NEXT: |-BinaryOperator [[ADDR_26:0x[a-z0-9]*]] <line:24:10, line:26:24> 'int' '+' // C-NEXT: | |-BinaryOperator [[ADDR_27:0x[a-z0-9]*]] <line:24:10, line:25:24> 'int' '+' // C-NEXT: | | |-PseudoObjectExpr [[ADDR_28:0x[a-z0-9]*]] <line:24:10, col:23> 'int' // C-NEXT: | | | |-CallExpr [[ADDR_29:0x[a-z0-9]*]] <col:10, col:23> 'int' // C-NEXT: | | | | `-ImplicitCastExpr [[ADDR_30:0x[a-z0-9]*]] <col:10, col:21> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | | | `-ParenExpr [[ADDR_31:0x[a-z0-9]*]] <col:10, col:21> 'int ({{.*}})' // C-NEXT: | | | | `-DeclRefExpr [[ADDR_32:0x[a-z0-9]*]] <col:11> 'int ({{.*}})' Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // C-NEXT: | | | `-CallExpr [[ADDR_33:0x[a-z0-9]*]] <line:10:1, line:24:23> 'int' // C-NEXT: | | | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <line:10:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: | | `-PseudoObjectExpr [[ADDR_35:0x[a-z0-9]*]] <line:25:10, col:24> 'int' // C-NEXT: | | |-CallExpr [[ADDR_36:0x[a-z0-9]*]] <col:10, col:24> 'int' // C-NEXT: | | | `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <col:10, col:22> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | | `-ParenExpr [[ADDR_38:0x[a-z0-9]*]] <col:10, col:22> 'int ({{.*}})' // C-NEXT: | | | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:11> 'int ({{.*}})' Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // C-NEXT: | | `-CallExpr [[ADDR_40:0x[a-z0-9]*]] <line:13:1, line:25:24> 'int' // C-NEXT: | | `-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <line:13:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | | `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: | `-PseudoObjectExpr [[ADDR_42:0x[a-z0-9]*]] <line:26:10, col:24> 'int' // C-NEXT: | |-CallExpr [[ADDR_43:0x[a-z0-9]*]] <col:10, col:24> 'int' // C-NEXT: | | `-ParenExpr [[ADDR_44:0x[a-z0-9]*]] <col:10, col:22> 'int (*)({{.*}})' // C-NEXT: | | `-UnaryOperator [[ADDR_45:0x[a-z0-9]*]] <col:11, col:12> 'int (*)({{.*}})' prefix '&' cannot overflow // C-NEXT: | | `-DeclRefExpr [[ADDR_46:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // C-NEXT: | `-CallExpr [[ADDR_47:0x[a-z0-9]*]] <line:10:1, line:26:24> 'int' // C-NEXT: | `-ImplicitCastExpr [[ADDR_48:0x[a-z0-9]*]] <line:10:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // C-NEXT: `-PseudoObjectExpr [[ADDR_49:0x[a-z0-9]*]] <line:27:10, col:25> 'int' // C-NEXT: |-CallExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:25> 'int' // C-NEXT: | `-ParenExpr [[ADDR_51:0x[a-z0-9]*]] <col:10, col:23> 'int (*)({{.*}})' // C-NEXT: | `-UnaryOperator [[ADDR_52:0x[a-z0-9]*]] <col:11, col:12> 'int (*)({{.*}})' prefix '&' cannot overflow // C-NEXT: | `-DeclRefExpr [[ADDR_53:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // C-NEXT: `-CallExpr [[ADDR_54:0x[a-z0-9]*]] <line:13:1, line:27:25> 'int' // C-NEXT: `-ImplicitCastExpr [[ADDR_55:0x[a-z0-9]*]] <line:13:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // C-NEXT: `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // CXX-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 1 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})' // CXX-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})' // CXX-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1> // CXX-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10> // CXX-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0 // CXX-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})' // CXX-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1> // CXX-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10> // CXX-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 2 // CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)} // CXX-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: `-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:28:1> line:22:5 main 'int ({{.*}})' // CXX-NEXT: `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:12, line:28:1> // CXX-NEXT: `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <line:24:3, line:27:25> // CXX-NEXT: `-BinaryOperator [[ADDR_25:0x[a-z0-9]*]] <line:24:10, line:27:25> 'int' '+' // CXX-NEXT: |-BinaryOperator [[ADDR_26:0x[a-z0-9]*]] <line:24:10, line:26:24> 'int' '+' // CXX-NEXT: | |-BinaryOperator [[ADDR_27:0x[a-z0-9]*]] <line:24:10, line:25:24> 'int' '+' // CXX-NEXT: | | |-PseudoObjectExpr [[ADDR_28:0x[a-z0-9]*]] <line:24:10, col:23> 'int' // CXX-NEXT: | | | |-CallExpr [[ADDR_29:0x[a-z0-9]*]] <col:10, col:23> 'int' // CXX-NEXT: | | | | `-ImplicitCastExpr [[ADDR_30:0x[a-z0-9]*]] <col:10, col:21> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | | | `-ParenExpr [[ADDR_31:0x[a-z0-9]*]] <col:10, col:21> 'int ({{.*}})' lvalue // CXX-NEXT: | | | | `-DeclRefExpr [[ADDR_32:0x[a-z0-9]*]] <col:11> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // CXX-NEXT: | | | `-CallExpr [[ADDR_33:0x[a-z0-9]*]] <line:10:1, line:24:23> 'int' // CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <line:10:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: | | `-PseudoObjectExpr [[ADDR_35:0x[a-z0-9]*]] <line:25:10, col:24> 'int' // CXX-NEXT: | | |-CallExpr [[ADDR_36:0x[a-z0-9]*]] <col:10, col:24> 'int' // CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <col:10, col:22> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | | `-ParenExpr [[ADDR_38:0x[a-z0-9]*]] <col:10, col:22> 'int ({{.*}})' lvalue // CXX-NEXT: | | | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:11> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // CXX-NEXT: | | `-CallExpr [[ADDR_40:0x[a-z0-9]*]] <line:13:1, line:25:24> 'int' // CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <line:13:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | | `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: | `-PseudoObjectExpr [[ADDR_42:0x[a-z0-9]*]] <line:26:10, col:24> 'int' // CXX-NEXT: | |-CallExpr [[ADDR_43:0x[a-z0-9]*]] <col:10, col:24> 'int' // CXX-NEXT: | | `-ParenExpr [[ADDR_44:0x[a-z0-9]*]] <col:10, col:22> 'int (*)({{.*}})' // CXX-NEXT: | | `-UnaryOperator [[ADDR_45:0x[a-z0-9]*]] <col:11, col:12> 'int (*)({{.*}})' prefix '&' cannot overflow // CXX-NEXT: | | `-DeclRefExpr [[ADDR_46:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // CXX-NEXT: | `-CallExpr [[ADDR_47:0x[a-z0-9]*]] <line:10:1, line:26:24> 'int' // CXX-NEXT: | `-ImplicitCastExpr [[ADDR_48:0x[a-z0-9]*]] <line:10:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CXX-NEXT: `-PseudoObjectExpr [[ADDR_49:0x[a-z0-9]*]] <line:27:10, col:25> 'int' // CXX-NEXT: |-CallExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:25> 'int' // CXX-NEXT: | `-ParenExpr [[ADDR_51:0x[a-z0-9]*]] <col:10, col:23> 'int (*)({{.*}})' // CXX-NEXT: | `-UnaryOperator [[ADDR_52:0x[a-z0-9]*]] <col:11, col:12> 'int (*)({{.*}})' prefix '&' cannot overflow // CXX-NEXT: | `-DeclRefExpr [[ADDR_53:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // CXX-NEXT: `-CallExpr [[ADDR_54:0x[a-z0-9]*]] <line:13:1, line:27:25> 'int' // CXX-NEXT: `-ImplicitCastExpr [[ADDR_55:0x[a-z0-9]*]] <line:13:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CXX-NEXT: `-DeclRefExpr [[ADDR_5]] <col:1> 'int ({{.*}})' Function [[ADDR_6]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
491100.c
/**********************************************************************/ /* ____ ____ */ /* / /\/ / */ /* /___/ \ / */ /* \ \ \/ */ /* \ \ Copyright (c) 2003-2020 Xilinx, Inc. */ /* / / All Right Reserved. */ /* /---/ /\ */ /* \ \ / \ */ /* \___\/\___\ */ /**********************************************************************/ #if defined(_WIN32) #include "stdio.h" #define IKI_DLLESPEC __declspec(dllimport) #else #define IKI_DLLESPEC #endif #include "iki.h" #include <string.h> #include <math.h> #ifdef __GNUC__ #include <stdlib.h> #else #include <malloc.h> #define alloca _alloca #endif /**********************************************************************/ /* ____ ____ */ /* / /\/ / */ /* /___/ \ / */ /* \ \ \/ */ /* \ \ Copyright (c) 2003-2020 Xilinx, Inc. */ /* / / All Right Reserved. */ /* /---/ /\ */ /* \ \ / \ */ /* \___\/\___\ */ /**********************************************************************/ #if defined(_WIN32) #include "stdio.h" #define IKI_DLLESPEC __declspec(dllimport) #else #define IKI_DLLESPEC #endif #include "iki.h" #include <string.h> #include <math.h> #ifdef __GNUC__ #include <stdlib.h> #else #include <malloc.h> #define alloca _alloca #endif typedef void (*funcp)(char *, char *); extern int main(int, char**); IKI_DLLESPEC extern void execute_2(char*, char *); IKI_DLLESPEC extern void execute_4(char*, char *); IKI_DLLESPEC extern void execute_5(char*, char *); IKI_DLLESPEC extern void execute_6(char*, char *); IKI_DLLESPEC extern void execute_7(char*, char *); IKI_DLLESPEC extern void execute_8(char*, char *); IKI_DLLESPEC extern void execute_9(char*, char *); IKI_DLLESPEC extern void execute_10(char*, char *); IKI_DLLESPEC extern void execute_11(char*, char *); IKI_DLLESPEC extern void execute_12(char*, char *); IKI_DLLESPEC extern void execute_13(char*, char *); IKI_DLLESPEC extern void vlog_transfunc_eventcallback(char*, char*, unsigned, unsigned, unsigned, char *); funcp funcTab[12] = {(funcp)execute_2, (funcp)execute_4, (funcp)execute_5, (funcp)execute_6, (funcp)execute_7, (funcp)execute_8, (funcp)execute_9, (funcp)execute_10, (funcp)execute_11, (funcp)execute_12, (funcp)execute_13, (funcp)vlog_transfunc_eventcallback}; const int NumRelocateId= 12; void relocate(char *dp) { iki_relocate(dp, "xsim.dir/shit_reg_8_behav/xsim.reloc", (void **)funcTab, 12); /*Populate the transaction function pointer field in the whole net structure */ } void sensitize(char *dp) { iki_sensitize(dp, "xsim.dir/shit_reg_8_behav/xsim.reloc"); } void simulate(char *dp) { iki_schedule_processes_at_time_zero(dp, "xsim.dir/shit_reg_8_behav/xsim.reloc"); // Initialize Verilog nets in mixed simulation, for the cases when the value at time 0 should be propagated from the mixed language Vhdl net iki_execute_processes(); // Schedule resolution functions for the multiply driven Verilog nets that have strength // Schedule transaction functions for the singly driven Verilog nets that have strength } #include "iki_bridge.h" void relocate(char *); void sensitize(char *); void simulate(char *); extern SYSTEMCLIB_IMP_DLLSPEC void local_register_implicit_channel(int, char*); extern SYSTEMCLIB_IMP_DLLSPEC int xsim_argc_copy ; extern SYSTEMCLIB_IMP_DLLSPEC char** xsim_argv_copy ; int main(int argc, char **argv) { iki_heap_initialize("ms", "isimmm", 0, 2147483648) ; iki_set_sv_type_file_path_name("xsim.dir/shit_reg_8_behav/xsim.svtype"); iki_set_crvs_dump_file_path_name("xsim.dir/shit_reg_8_behav/xsim.crvsdump"); void* design_handle = iki_create_design("xsim.dir/shit_reg_8_behav/xsim.mem", (void *)relocate, (void *)sensitize, (void *)simulate, (void*)0, 0, isimBridge_getWdbWriter(), 0, argc, argv); iki_set_rc_trial_count(100); (void) design_handle; return iki_simulate_design(); }
904578.c
/* ************************************************ username : smmehrab fullname : s.m.mehrabul islam email : mehrab.24csedu.001@gmail.com institute : university of dhaka, bangladesh session : 2017-2018 ************************************************ */ #include<stdio.h> unsigned long long int algo(unsigned long long int n) { unsigned long long int b=0; while(1) { if(n==1){b++;return b;} b++; if((n%2)==1) {n=(3*n)+1;} else {n=n/2;} } } int main() { unsigned long long int i,j,a,max,l; while(scanf("%llu %llu",&i,&j)==2) { max=0; if(i<j) { a=i; while(a<=j) { l=algo(a); if(l>max) {max=l;} a++; } } else { a=j; while(a<=i) { l=algo(a); if(l>max) {max=l;} a++; } } printf("%llu %llu %llu\n",i,j,max); } return 0; }
268274.c
/* * * Intel Management Engine Interface (Intel MEI) Linux driver * Copyright (c) 2003-2012, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * */ #include <linux/export.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/fs.h> #include <linux/jiffies.h> #include <linux/slab.h> #include <linux/mei.h> #include "mei_dev.h" #include "hbm.h" #include "client.h" /** * mei_irq_compl_handler - dispatch complete handlers * for the completed callbacks * * @dev: mei device * @compl_list: list of completed cbs */ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) { struct mei_cl_cb *cb, *next; struct mei_cl *cl; list_for_each_entry_safe(cb, next, &compl_list->list, list) { cl = cb->cl; list_del(&cb->list); if (!cl) continue; dev_dbg(dev->dev, "completing call back.\n"); if (cl == &dev->iamthif_cl) mei_amthif_complete(dev, cb); else mei_cl_complete(cl, cb); } } EXPORT_SYMBOL_GPL(mei_irq_compl_handler); /** * mei_cl_hbm_equal - check if hbm is addressed to the client * * @cl: host client * @mei_hdr: header of mei client message * * Return: true if matches, false otherwise */ static inline int mei_cl_hbm_equal(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) { return cl->host_client_id == mei_hdr->host_addr && cl->me_client_id == mei_hdr->me_addr; } /** * mei_cl_is_reading - checks if the client * is the one to read this message * * @cl: mei client * @mei_hdr: header of mei message * * Return: true on match and false otherwise */ static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) { return mei_cl_hbm_equal(cl, mei_hdr) && cl->state == MEI_FILE_CONNECTED && cl->reading_state != MEI_READ_COMPLETE; } /** * mei_cl_irq_read_msg - process client message * * @dev: the device structure * @mei_hdr: header of mei client message * @complete_list: An instance of our list structure * * Return: 0 on success, <0 on failure. */ static int mei_cl_irq_read_msg(struct mei_device *dev, struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *complete_list) { struct mei_cl *cl; struct mei_cl_cb *cb, *next; unsigned char *buffer = NULL; list_for_each_entry_safe(cb, next, &dev->read_list.list, list) { cl = cb->cl; if (!cl || !mei_cl_is_reading(cl, mei_hdr)) continue; cl->reading_state = MEI_READING; if (cb->response_buffer.size == 0 || cb->response_buffer.data == NULL) { cl_err(dev, cl, "response buffer is not allocated.\n"); list_del(&cb->list); return -ENOMEM; } if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) { cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", cb->response_buffer.size, mei_hdr->length, cb->buf_idx); buffer = krealloc(cb->response_buffer.data, mei_hdr->length + cb->buf_idx, GFP_KERNEL); if (!buffer) { list_del(&cb->list); return -ENOMEM; } cb->response_buffer.data = buffer; cb->response_buffer.size = mei_hdr->length + cb->buf_idx; } buffer = cb->response_buffer.data + cb->buf_idx; mei_read_slots(dev, buffer, mei_hdr->length); cb->buf_idx += mei_hdr->length; if (mei_hdr->msg_complete) { cl->status = 0; list_del(&cb->list); cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx); list_add_tail(&cb->list, &complete_list->list); } break; } dev_dbg(dev->dev, "message read\n"); if (!buffer) { mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", MEI_HDR_PRM(mei_hdr)); } return 0; } /** * mei_cl_irq_disconnect_rsp - send disconnection response message * * @cl: client * @cb: callback block. * @cmpl_list: complete list. * * Return: 0, OK; otherwise, error. */ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list) { struct mei_device *dev = cl->dev; u32 msg_slots; int slots; int ret; slots = mei_hbuf_empty_slots(dev); msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); if (slots < msg_slots) return -EMSGSIZE; ret = mei_hbm_cl_disconnect_rsp(dev, cl); cl->state = MEI_FILE_DISCONNECTED; cl->status = 0; list_del(&cb->list); mei_io_cb_free(cb); return ret; } /** * mei_cl_irq_disconnect - processes close related operation from * interrupt thread context - send disconnect request * * @cl: client * @cb: callback block. * @cmpl_list: complete list. * * Return: 0, OK; otherwise, error. */ static int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list) { struct mei_device *dev = cl->dev; u32 msg_slots; int slots; msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); slots = mei_hbuf_empty_slots(dev); if (slots < msg_slots) return -EMSGSIZE; if (mei_hbm_cl_disconnect_req(dev, cl)) { cl->status = 0; cb->buf_idx = 0; list_move_tail(&cb->list, &cmpl_list->list); return -EIO; } cl->state = MEI_FILE_DISCONNECTING; cl->status = 0; cb->buf_idx = 0; list_move_tail(&cb->list, &dev->ctrl_rd_list.list); cl->timer_count = MEI_CONNECT_TIMEOUT; return 0; } /** * mei_cl_irq_read - processes client read related operation from the * interrupt thread context - request for flow control credits * * @cl: client * @cb: callback block. * @cmpl_list: complete list. * * Return: 0, OK; otherwise, error. */ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list) { struct mei_device *dev = cl->dev; u32 msg_slots; int slots; int ret; msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); slots = mei_hbuf_empty_slots(dev); if (slots < msg_slots) return -EMSGSIZE; ret = mei_hbm_cl_flow_control_req(dev, cl); if (ret) { cl->status = ret; cb->buf_idx = 0; list_move_tail(&cb->list, &cmpl_list->list); return ret; } list_move_tail(&cb->list, &dev->read_list.list); return 0; } /** * mei_cl_irq_connect - send connect request in irq_thread context * * @cl: client * @cb: callback block. * @cmpl_list: complete list. * * Return: 0, OK; otherwise, error. */ static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list) { struct mei_device *dev = cl->dev; u32 msg_slots; int slots; int ret; msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); slots = mei_hbuf_empty_slots(dev); if (mei_cl_is_other_connecting(cl)) return 0; if (slots < msg_slots) return -EMSGSIZE; cl->state = MEI_FILE_CONNECTING; ret = mei_hbm_cl_connect_req(dev, cl); if (ret) { cl->status = ret; cb->buf_idx = 0; list_del(&cb->list); return ret; } list_move_tail(&cb->list, &dev->ctrl_rd_list.list); cl->timer_count = MEI_CONNECT_TIMEOUT; return 0; } /** * mei_irq_read_handler - bottom half read routine after ISR to * handle the read processing. * * @dev: the device structure * @cmpl_list: An instance of our list structure * @slots: slots to read. * * Return: 0 on success, <0 on failure. */ int mei_irq_read_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list, s32 *slots) { struct mei_msg_hdr *mei_hdr; struct mei_cl *cl; int ret; if (!dev->rd_msg_hdr) { dev->rd_msg_hdr = mei_read_hdr(dev); (*slots)--; dev_dbg(dev->dev, "slots =%08x.\n", *slots); } mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); if (mei_hdr->reserved || !dev->rd_msg_hdr) { dev_err(dev->dev, "corrupted message header 0x%08X\n", dev->rd_msg_hdr); ret = -EBADMSG; goto end; } if (mei_slots2data(*slots) < mei_hdr->length) { dev_err(dev->dev, "less data available than length=%08x.\n", *slots); /* we can't read the message */ ret = -ENODATA; goto end; } /* HBM message */ if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { ret = mei_hbm_dispatch(dev, mei_hdr); if (ret) { dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", ret); goto end; } goto reset_slots; } /* find recipient cl */ list_for_each_entry(cl, &dev->file_list, link) { if (mei_cl_hbm_equal(cl, mei_hdr)) { cl_dbg(dev, cl, "got a message\n"); break; } } /* if no recipient cl was found we assume corrupted header */ if (&cl->link == &dev->file_list) { dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr); ret = -EBADMSG; goto end; } if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && MEI_FILE_CONNECTED == dev->iamthif_cl.state && dev->iamthif_state == MEI_IAMTHIF_READING) { ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); if (ret) { dev_err(dev->dev, "mei_amthif_irq_read_msg failed = %d\n", ret); goto end; } } else { ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); if (ret) { dev_err(dev->dev, "mei_cl_irq_read_msg failed = %d\n", ret); goto end; } } reset_slots: /* reset the number of slots and header */ *slots = mei_count_full_read_slots(dev); dev->rd_msg_hdr = 0; if (*slots == -EOVERFLOW) { /* overflow - reset */ dev_err(dev->dev, "resetting due to slots overflow.\n"); /* set the event since message has been read */ ret = -ERANGE; goto end; } end: return ret; } EXPORT_SYMBOL_GPL(mei_irq_read_handler); /** * mei_irq_write_handler - dispatch write requests * after irq received * * @dev: the device structure * @cmpl_list: An instance of our list structure * * Return: 0 on success, <0 on failure. */ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) { struct mei_cl *cl; struct mei_cl_cb *cb, *next; struct mei_cl_cb *list; s32 slots; int ret; if (!mei_hbuf_acquire(dev)) return 0; slots = mei_hbuf_empty_slots(dev); if (slots <= 0) return -EMSGSIZE; /* complete all waiting for write CB */ dev_dbg(dev->dev, "complete all waiting for write cb.\n"); list = &dev->write_waiting_list; list_for_each_entry_safe(cb, next, &list->list, list) { cl = cb->cl; if (cl == NULL) continue; cl->status = 0; list_del(&cb->list); if (cb->fop_type == MEI_FOP_WRITE && cl != &dev->iamthif_cl) { cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); cl->writing_state = MEI_WRITE_COMPLETE; list_add_tail(&cb->list, &cmpl_list->list); } if (cl == &dev->iamthif_cl) { cl_dbg(dev, cl, "check iamthif flow control.\n"); if (dev->iamthif_flow_control_pending) { ret = mei_amthif_irq_read(dev, &slots); if (ret) return ret; } } } if (dev->wd_state == MEI_WD_STOPPING) { dev->wd_state = MEI_WD_IDLE; wake_up(&dev->wait_stop_wd); } if (mei_cl_is_connected(&dev->wd_cl)) { if (dev->wd_pending && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { ret = mei_wd_send(dev); if (ret) return ret; dev->wd_pending = false; } } /* complete control write list CB */ dev_dbg(dev->dev, "complete control write list cb.\n"); list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { cl = cb->cl; if (!cl) { list_del(&cb->list); return -ENODEV; } switch (cb->fop_type) { case MEI_FOP_DISCONNECT: /* send disconnect message */ ret = mei_cl_irq_disconnect(cl, cb, cmpl_list); if (ret) return ret; break; case MEI_FOP_READ: /* send flow control message */ ret = mei_cl_irq_read(cl, cb, cmpl_list); if (ret) return ret; break; case MEI_FOP_CONNECT: /* connect message */ ret = mei_cl_irq_connect(cl, cb, cmpl_list); if (ret) return ret; break; case MEI_FOP_DISCONNECT_RSP: /* send disconnect resp */ ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); if (ret) return ret; break; default: BUG(); } } /* complete write list CB */ dev_dbg(dev->dev, "complete write list cb.\n"); list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { cl = cb->cl; if (cl == NULL) continue; if (cl == &dev->iamthif_cl) ret = mei_amthif_irq_write(cl, cb, cmpl_list); else ret = mei_cl_irq_write(cl, cb, cmpl_list); if (ret) return ret; } return 0; } EXPORT_SYMBOL_GPL(mei_irq_write_handler); /** * mei_timer - timer function. * * @work: pointer to the work_struct structure * */ void mei_timer(struct work_struct *work) { unsigned long timeout; struct mei_cl *cl; struct mei_device *dev = container_of(work, struct mei_device, timer_work.work); mutex_lock(&dev->device_lock); /* Catch interrupt stalls during HBM init handshake */ if (dev->dev_state == MEI_DEV_INIT_CLIENTS && dev->hbm_state != MEI_HBM_IDLE) { if (dev->init_clients_timer) { if (--dev->init_clients_timer == 0) { dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n", dev->hbm_state); mei_reset(dev); goto out; } } } if (dev->dev_state != MEI_DEV_ENABLED) goto out; /*** connect/disconnect timeouts ***/ list_for_each_entry(cl, &dev->file_list, link) { if (cl->timer_count) { if (--cl->timer_count == 0) { dev_err(dev->dev, "timer: connect/disconnect timeout.\n"); mei_reset(dev); goto out; } } } if (!mei_cl_is_connected(&dev->iamthif_cl)) goto out; if (dev->iamthif_stall_timer) { if (--dev->iamthif_stall_timer == 0) { dev_err(dev->dev, "timer: amthif hanged.\n"); mei_reset(dev); dev->iamthif_msg_buf_size = 0; dev->iamthif_msg_buf_index = 0; dev->iamthif_canceled = false; dev->iamthif_ioctl = true; dev->iamthif_state = MEI_IAMTHIF_IDLE; dev->iamthif_timer = 0; mei_io_cb_free(dev->iamthif_current_cb); dev->iamthif_current_cb = NULL; dev->iamthif_file_object = NULL; mei_amthif_run_next_cmd(dev); } } if (dev->iamthif_timer) { timeout = dev->iamthif_timer + mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n", dev->iamthif_timer); dev_dbg(dev->dev, "timeout = %ld\n", timeout); dev_dbg(dev->dev, "jiffies = %ld\n", jiffies); if (time_after(jiffies, timeout)) { /* * User didn't read the AMTHI data on time (15sec) * freeing AMTHI for other requests */ dev_dbg(dev->dev, "freeing AMTHI for other requests\n"); mei_io_list_flush(&dev->amthif_rd_complete_list, &dev->iamthif_cl); mei_io_cb_free(dev->iamthif_current_cb); dev->iamthif_current_cb = NULL; dev->iamthif_file_object->private_data = NULL; dev->iamthif_file_object = NULL; dev->iamthif_timer = 0; mei_amthif_run_next_cmd(dev); } } out: if (dev->dev_state != MEI_DEV_DISABLED) schedule_delayed_work(&dev->timer_work, 2 * HZ); mutex_unlock(&dev->device_lock); }
37249.c
/* * ciaaPORT.c * * Created on: Dec 6, 2018 * Author: fran */ #include "ciaaPORT.h" // LEDS *************************************************** // Definiciones de LEDS para el SCU: #define PIN_PORT_LEDR 2 // F4 #define PIN_PORT_LEDG 2 // F4 #define PIN_PORT_LEDB 2 // F4 #define PIN_PORT_LED1 2 // F0 #define PIN_PORT_LED2 2 // F0 #define PIN_PORT_LED3 2 // F0 #define PIN_NUMB_LEDR 0 //gpio5 0 #define PIN_NUMB_LEDG 1 //gpio5 4 #define PIN_NUMB_LEDB 2 //gpio5 2 #define PIN_NUMB_LED1 10 //gpio0 14 #define PIN_NUMB_LED2 11 //gpio1 11 #define PIN_NUMB_LED3 12 //gpio1 12 // Definiciones de GPIO para api GPIO: #define GPIO_PORT_LEDR 5 #define GPIO_PORT_LEDG 5 #define GPIO_PORT_LEDB 5 #define GPIO_PORT_LED1 0 #define GPIO_PORT_LED2 1 #define GPIO_PORT_LED3 1 #define GPIO_NUMB_LEDR 0 #define GPIO_NUMB_LEDG 1 #define GPIO_NUMB_LEDB 2 #define GPIO_NUMB_LED1 14 #define GPIO_NUMB_LED2 11 #define GPIO_NUMB_LED3 12 // TECLAS *************************************************** #define PIN_PORT_TEC1 1 // F0 #define PIN_PORT_TEC2 1 // F0 #define PIN_PORT_TEC3 1 // F0 #define PIN_PORT_TEC4 1 // F0 #define PIN_NUMB_TEC1 0 //gpio0 4 #define PIN_NUMB_TEC2 1 //gpio0 8 #define PIN_NUMB_TEC3 2 //gpio0 9 #define PIN_NUMB_TEC4 6 //gpio1 9 // Definiciones de GPIO para api GPIO: #define GPIO_PORT_TEC1 0 #define GPIO_PORT_TEC2 0 #define GPIO_PORT_TEC3 0 #define GPIO_PORT_TEC4 1 #define GPIO_NUMB_TEC1 4 #define GPIO_NUMB_TEC2 8 #define GPIO_NUMB_TEC3 9 #define GPIO_NUMB_TEC4 9 // GPIOS *************************************************** // Definiciones de GPIOs para el SCU: #define PIN_PORT_GPIO0 6 // F0 #define PIN_PORT_GPIO1 6 // F0 #define PIN_PORT_GPIO2 6 // F0 #define PIN_PORT_GPIO3 6 // F0 #define PIN_PORT_GPIO4 6 // F0 #define PIN_PORT_GPIO5 6 // F0 #define PIN_PORT_GPIO6 6 // F0 #define PIN_PORT_GPIO7 6 // F0 #define PIN_PORT_GPIO8 6 // F0 #define PIN_NUMB_GPIO0 1 //gpio3 0 #define PIN_NUMB_GPIO1 4 //gpio3 1 //4 //2 #define PIN_NUMB_GPIO2 5 //gpio3 2 //5 //3 #define PIN_NUMB_GPIO3 7 //gpio3 3 //7 //4 #define PIN_NUMB_GPIO4 8 //gpio3 4 //8 //5 #define PIN_NUMB_GPIO5 9 //gpio3 5 #define PIN_NUMB_GPIO6 10 //gpio3 6 #define PIN_NUMB_GPIO7 11 //gpio3 7 #define PIN_NUMB_GPIO8 12 //gpio2 8 // Definiciones de GPIO para api GPIO: #define GPIO_PORT_GPIO0 3 #define GPIO_PORT_GPIO1 3 #define GPIO_PORT_GPIO2 3 #define GPIO_PORT_GPIO3 5 //3 //5 #define GPIO_PORT_GPIO4 5 //3 //5 #define GPIO_PORT_GPIO5 3 #define GPIO_PORT_GPIO6 3 #define GPIO_PORT_GPIO7 3 #define GPIO_PORT_GPIO8 2 #define GPIO_NUMB_GPIO0 0 #define GPIO_NUMB_GPIO1 3 //1 //3 #define GPIO_NUMB_GPIO2 4 //2 //4 #define GPIO_NUMB_GPIO3 15 //3 //15 #define GPIO_NUMB_GPIO4 16 //4 //16 #define GPIO_NUMB_GPIO5 5 #define GPIO_NUMB_GPIO6 6 #define GPIO_NUMB_GPIO7 7 #define GPIO_NUMB_GPIO8 8 //**************************************************** // Definiciones de OTROS GPIOs para el SCU: #define PIN_PORT_SPI_MISO 1 // F0 #define PIN_PORT_SPI_MOSI 1 // F0 #define PIN_PORT_SPI_SCK 0xF // NO DISPONIBLE #define PIN_PORT_LCD_EN 4 // F4 #define PIN_PORT_LCD_RS 4 // F4 #define PIN_PORT_LCD1 4 // F0 #define PIN_PORT_LCD2 4 // F0 #define PIN_PORT_LCD3 4 // F0 #define PIN_PORT_LCD4 4 // F4 #define PIN_NUMB_SPI_MISO 3 //gpio0 10 #define PIN_NUMB_SPI_MOSI 4 //gpio0 11 #define PIN_NUMB_SPI_SCK 4 #define PIN_NUMB_LCD_EN 9 //gpio5 13 #define PIN_NUMB_LCD_RS 8 //gpio5 12 #define PIN_NUMB_LCD1 4 //gpio2 4 #define PIN_NUMB_LCD2 5 //gpio2 5 #define PIN_NUMB_LCD3 6 //gpio2 6 #define PIN_NUMB_LCD4 10 //gpio5 14 // Definiciones de GPIO para api GPIO: #define GPIO_PORT_SPI_MISO 0 #define GPIO_PORT_SPI_MOSI 0 #define GPIO_PORT_LCD_EN 5 #define GPIO_PORT_LCD_RS 5 #define GPIO_PORT_LCD1 2 #define GPIO_PORT_LCD2 2 #define GPIO_PORT_LCD3 2 #define GPIO_PORT_LCD4 5 #define GPIO_NUMB_SPI_MISO 10 #define GPIO_NUMB_SPI_MOSI 11 #define GPIO_NUMB_LCD_EN 13 #define GPIO_NUMB_LCD_RS 12 #define GPIO_NUMB_LCD1 4 #define GPIO_NUMB_LCD2 5 #define GPIO_NUMB_LCD3 6 #define GPIO_NUMB_LCD4 14 // PARAMETROS CFG ***************************************** #define PIN_FUNCION_0 0 #define PIN_FUNCION_4 4 const gpioCFG_t pin_config[] = { {PIN_PORT_LEDR, PIN_NUMB_LEDR, PIN_FUNCION_4, GPIO_PORT_LEDR, GPIO_NUMB_LEDR }, {PIN_PORT_LEDG, PIN_NUMB_LEDG, PIN_FUNCION_4, GPIO_PORT_LEDG, GPIO_NUMB_LEDG }, {PIN_PORT_LEDB, PIN_NUMB_LEDB, PIN_FUNCION_4, GPIO_PORT_LEDB, GPIO_NUMB_LEDB }, {PIN_PORT_LED1, PIN_NUMB_LED1, PIN_FUNCION_0, GPIO_PORT_LED1, GPIO_NUMB_LED1 }, {PIN_PORT_LED2, PIN_NUMB_LED2, PIN_FUNCION_0, GPIO_PORT_LED2, GPIO_NUMB_LED2 }, {PIN_PORT_LED3, PIN_NUMB_LED3, PIN_FUNCION_0, GPIO_PORT_LED3, GPIO_NUMB_LED3 }, {PIN_PORT_TEC1, PIN_NUMB_TEC1, PIN_FUNCION_0, GPIO_PORT_TEC1, GPIO_NUMB_TEC1 }, {PIN_PORT_TEC2, PIN_NUMB_TEC2, PIN_FUNCION_0, GPIO_PORT_TEC2, GPIO_NUMB_TEC2 }, {PIN_PORT_TEC3, PIN_NUMB_TEC3, PIN_FUNCION_0, GPIO_PORT_TEC3, GPIO_NUMB_TEC3 }, {PIN_PORT_TEC4, PIN_NUMB_TEC4, PIN_FUNCION_0, GPIO_PORT_TEC4, GPIO_NUMB_TEC4 }, {PIN_PORT_GPIO0, PIN_NUMB_GPIO0, PIN_FUNCION_0, GPIO_PORT_GPIO0, GPIO_NUMB_GPIO0 }, {PIN_PORT_GPIO1, PIN_NUMB_GPIO1, PIN_FUNCION_0, GPIO_PORT_GPIO1, GPIO_NUMB_GPIO1 }, {PIN_PORT_GPIO2, PIN_NUMB_GPIO2, PIN_FUNCION_0, GPIO_PORT_GPIO2, GPIO_NUMB_GPIO2 }, {PIN_PORT_GPIO3, PIN_NUMB_GPIO3, PIN_FUNCION_0, GPIO_PORT_GPIO3, GPIO_NUMB_GPIO3 }, {PIN_PORT_GPIO4, PIN_NUMB_GPIO4, PIN_FUNCION_0, GPIO_PORT_GPIO4, GPIO_NUMB_GPIO4 }, {PIN_PORT_GPIO5, PIN_NUMB_GPIO5, PIN_FUNCION_0, GPIO_PORT_GPIO5, GPIO_NUMB_GPIO5 }, {PIN_PORT_GPIO6, PIN_NUMB_GPIO6, PIN_FUNCION_0, GPIO_PORT_GPIO6, GPIO_NUMB_GPIO6 }, {PIN_PORT_GPIO7, PIN_NUMB_GPIO7, PIN_FUNCION_0, GPIO_PORT_GPIO7, GPIO_NUMB_GPIO7 }, {PIN_PORT_GPIO8, PIN_NUMB_GPIO8, PIN_FUNCION_0, GPIO_PORT_GPIO8, GPIO_NUMB_GPIO8 }, {PIN_PORT_LCD_EN, PIN_NUMB_LCD_EN, PIN_FUNCION_4, GPIO_PORT_LCD_EN, GPIO_NUMB_LCD_EN }, {PIN_PORT_LCD_RS, PIN_NUMB_LCD_RS, PIN_FUNCION_4, GPIO_PORT_LCD_RS, GPIO_NUMB_LCD_RS }, {PIN_PORT_LCD1, PIN_NUMB_LCD1, PIN_FUNCION_0, GPIO_PORT_LCD1, GPIO_NUMB_LCD1 }, {PIN_PORT_LCD2, PIN_NUMB_LCD2, PIN_FUNCION_0, GPIO_PORT_LCD2, GPIO_NUMB_LCD2 }, {PIN_PORT_LCD3, PIN_NUMB_LCD3, PIN_FUNCION_0, GPIO_PORT_LCD3, GPIO_NUMB_LCD3 }, {PIN_PORT_LCD4, PIN_NUMB_LCD4, PIN_FUNCION_4, GPIO_PORT_LCD4, GPIO_NUMB_LCD4 }, {PIN_PORT_SPI_MISO, PIN_NUMB_SPI_MISO, PIN_FUNCION_0, GPIO_PORT_SPI_MISO, GPIO_NUMB_SPI_MISO }, {PIN_PORT_SPI_SCK, PIN_NUMB_SPI_SCK, PIN_FUNCION_0, GPIO_PORT_LCD3, GPIO_NUMB_LCD3 }, // NO ESTA DISPONIBLE COMO GPIO!!! NO USAR!! {PIN_PORT_SPI_MOSI, PIN_NUMB_SPI_MOSI, PIN_FUNCION_0, GPIO_PORT_SPI_MOSI, GPIO_NUMB_SPI_MOSI }, };
540239.c
/** @file C Run-Time Libraries (CRT) Time Management Routines Wrapper Implementation for OpenSSL-based Cryptographic Library (used in DXE & RUNTIME). Copyright (c) 2010 - 2017, Intel Corporation. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. **/ #include <CrtLibSupport.h> // // -- Time Management Routines -- // #define IsLeap(y) (((y) % 4) == 0 && (((y) % 100) != 0 || ((y) % 400) == 0)) #define SECSPERMIN (60) #define SECSPERHOUR (60 * 60) #define SECSPERDAY (24 * SECSPERHOUR) // // The arrays give the cumulative number of days up to the first of the // month number used as the index (1 -> 12) for regular and leap years. // The value at index 13 is for the whole year. // UINTN CumulativeDays[2][14] = { { 0, 0, 31, 31 + 28, 31 + 28 + 31, 31 + 28 + 31 + 30, 31 + 28 + 31 + 30 + 31, 31 + 28 + 31 + 30 + 31 + 30, 31 + 28 + 31 + 30 + 31 + 30 + 31, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31 }, { 0, 0, 31, 31 + 29, 31 + 29 + 31, 31 + 29 + 31 + 30, 31 + 29 + 31 + 30 + 31, 31 + 29 + 31 + 30 + 31 + 30, 31 + 29 + 31 + 30 + 31 + 30 + 31, 31 + 29 + 31 + 30 + 31 + 30 + 31 + 31, 31 + 29 + 31 + 30 + 31 + 30 + 31 + 31 + 30, 31 + 29 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, 31 + 29 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, 31 + 29 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31 } }; /* Get the system time as seconds elapsed since midnight, January 1, 1970. */ //INTN time( // INTN *timer // ) time_t time (time_t *timer) { EFI_TIME Time; time_t CalTime; UINTN Year; // // Get the current time and date information // uefi_call_wrapper(RT->GetTime, 2, &Time, NULL); // // Years Handling // UTime should now be set to 00:00:00 on Jan 1 of the current year. // for (Year = 1970, CalTime = 0; Year != Time.Year; Year++) { CalTime = CalTime + (time_t)(CumulativeDays[IsLeap(Year)][13] * SECSPERDAY); } // // Add in number of seconds for current Month, Day, Hour, Minute, Seconds, and TimeZone adjustment // CalTime = CalTime + (time_t)((Time.TimeZone != EFI_UNSPECIFIED_TIMEZONE) ? (Time.TimeZone * 60) : 0) + (time_t)(CumulativeDays[IsLeap(Time.Year)][Time.Month] * SECSPERDAY) + (time_t)(((Time.Day > 0) ? Time.Day - 1 : 0) * SECSPERDAY) + (time_t)(Time.Hour * SECSPERHOUR) + (time_t)(Time.Minute * 60) + (time_t)Time.Second; if (timer != NULL) { *timer = CalTime; } return CalTime; } // // Convert a time value from type time_t to struct tm. // struct tm * gmtime (const time_t *timer) { struct tm *GmTime; UINT16 DayNo; UINT16 DayRemainder; time_t Year; time_t YearNo; UINT16 TotalDays; UINT16 MonthNo; if (timer == NULL) { return NULL; } GmTime = malloc (sizeof (struct tm)); if (GmTime == NULL) { return NULL; } ZeroMem ((VOID *) GmTime, (UINTN) sizeof (struct tm)); DayNo = (UINT16) (*timer / SECSPERDAY); DayRemainder = (UINT16) (*timer % SECSPERDAY); GmTime->tm_sec = (int) (DayRemainder % SECSPERMIN); GmTime->tm_min = (int) ((DayRemainder % SECSPERHOUR) / SECSPERMIN); GmTime->tm_hour = (int) (DayRemainder / SECSPERHOUR); GmTime->tm_wday = (int) ((DayNo + 4) % 7); for (Year = 1970, YearNo = 0; DayNo > 0; Year++) { TotalDays = (UINT16) (IsLeap (Year) ? 366 : 365); if (DayNo >= TotalDays) { DayNo = (UINT16) (DayNo - TotalDays); YearNo++; } else { break; } } GmTime->tm_year = (int) (YearNo + (1970 - 1900)); GmTime->tm_yday = (int) DayNo; for (MonthNo = 12; MonthNo > 1; MonthNo--) { if (DayNo >= CumulativeDays[IsLeap(Year)][MonthNo]) { DayNo = (UINT16) (DayNo - (UINT16) (CumulativeDays[IsLeap(Year)][MonthNo])); break; } } GmTime->tm_mon = (int) MonthNo - 1; GmTime->tm_mday = (int) DayNo + 1; GmTime->tm_isdst = 0; GmTime->tm_gmtoff = 0; GmTime->tm_zone = NULL; return GmTime; }
106715.c
/* * module.c - module implementation * * Copyright (c) 2000-2020 Shiro Kawai <shiro@acm.org> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the authors nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define LIBGAUCHE_BODY #include "gauche.h" #include "gauche/class.h" #include "gauche/priv/builtin-syms.h" #include "gauche/priv/moduleP.h" /* * Modules * * A module maps symbols to global locations (GLOCs). * The mapping is resolved at the compile time. * Scheme's current-module is therefore a syntax, instead of * a procedure, to capture compile-time information. * * Each module has two hashtables; the 'internal' table keeps all the * bindings in the module, while the 'external' table keeps only the * bindings that are exported. In most cases, the latter is a subset * of the former. If a binding is renamed on export, however, * two tables map different symbols on the same GLOC. * * Modules are registered to a global hash table using their names * as keys, so that the module is retrieved by its name. The exception * is "anonymous modules", which have #f as the name field * and not registered in the global table. Anonymous modules are especially * useful for certain applications that need temporary, segregated * namespace---for example, a 'sandbox' environment to evaluate an * expression sent over the network during a session. * The anonymous namespace will be garbage-collected if nobody references * it, recovering its resources. */ /* Note on mutex of module operation * * Each module used to have a mutex for accesses to it. I changed it * to use a single global lock (modules.mutex), based on the following * observations: * * - Profiling showed mutex_lock was taking around 10% of program loading * phase in the previous version. * * - Module operations almost always occur during program loading and * interactive session. Having giant lock for module operations won't * affect normal runtime performance. * * Benchmark showed the change made program loading 30% faster. */ /* Special treatment of keyword modules. * We need to achieve two goals: * * (1) For ordinary Gauche programs (modules that uses implicit inheritance * of #<module gauche>), we want to see all keywords being bound to * itself by default. It can be achieved by inheriting a module that * has such keyword bindings. * (2) For R7RS programs we want to see the default keyword bindings only * when the programmer explicitly asks so - notably, by importing some * special module. It can be achieved by a module that has all keywords * each bound to itself, *and* exports all of such bindings. * * It turned out we can't use one 'keyword' module for both purpose; if * we have a keyword module that exports all bindings, they are automatically * exported from modules that inherits them. This means if a R7RS program * imports any of Gauche modules, it carries all of keyword bindings. It's * not only nasty, but also dangerous for it can shadow bindings to the * symbols starting with a colon inadvertently. * * So we have two modules, #<module gauche.keyword> and * #<module keyword>. The former have export-all flag, and to be imported * from R7RS programs as needed. The latter doesn't export anything, and * to be inherited to Gauche modules by default. Whenever a keyword * is created, its default binding is inserted to both - actually, * to prevent two modules from being out of sync, we specially wire them * to share a single hashtable for their internal bindings. */ /* Note on gauche.require-base module It is an immutable module to which 'require' loads code. We need a base module where 'define-module' and 'define-library' are visible in order for requiring modules using them to work, so loading into the current module won't cut it. However, we don't want to use a specific mutable module (such as #<module gauche>) as a base, since if the required module has toplevel defines without switching the module, it will modify the base module. By using immutable module as a base, we can reject the latter case; requiring a code that inserts toplevel binding without specifying a module is simply a bad idea and shouldn't be allowed. */ /* Global module table */ static struct { ScmHashTable *table; /* Maps name -> module. */ ScmInternalMutex mutex; /* Lock for table. Only register_module and lookup_module may hold the lock. */ } modules; /* Predefined modules - slots will be initialized by Scm__InitModule */ #define DEFINE_STATIC_MODULE(cname) \ static ScmModule cname; DEFINE_STATIC_MODULE(nullModule); /* #<module null> */ DEFINE_STATIC_MODULE(schemeModule); /* #<module scheme> */ DEFINE_STATIC_MODULE(gaucheModule); /* #<module gauche> */ DEFINE_STATIC_MODULE(internalModule); /* #<module gauche.internal> */ DEFINE_STATIC_MODULE(gfModule); /* #<module gauche.gf> */ DEFINE_STATIC_MODULE(userModule); /* #<module user> */ DEFINE_STATIC_MODULE(keywordModule); /* #<module keyword> */ DEFINE_STATIC_MODULE(gkeywordModule); /* #<module gauche.keyword> */ DEFINE_STATIC_MODULE(reqbaseModule); /* #<module gauche.require-base> */ static ScmObj defaultParents = SCM_NIL; /* will be initialized */ static ScmObj defaultMpl = SCM_NIL; /* will be initialized */ /*---------------------------------------------------------------------- * Constructor */ static void init_module(ScmModule *m, ScmObj name, ScmHashTable *internal) { m->name = name; m->imported = m->depended = SCM_NIL; m->exportAll = FALSE; m->parents = defaultParents; m->mpl = Scm_Cons(SCM_OBJ(m), defaultMpl); if (internal) { m->internal = internal; } else { m->internal = SCM_HASH_TABLE(Scm_MakeHashTableSimple(SCM_HASH_EQ, 0)); } m->external = SCM_HASH_TABLE(Scm_MakeHashTableSimple(SCM_HASH_EQ, 0)); m->origin = m->prefix = SCM_FALSE; m->sealed = FALSE; m->placeholding = FALSE; } /* Internal */ static ScmObj make_module(ScmObj name, ScmHashTable *internal) { ScmModule *m = SCM_NEW(ScmModule); SCM_SET_CLASS(m, SCM_CLASS_MODULE); init_module(m, name, internal); return SCM_OBJ(m); } /* Internal. Lookup module with name N from the table. */ static ScmModule *lookup_module(ScmSymbol *name) { (void)SCM_INTERNAL_MUTEX_LOCK(modules.mutex); ScmObj v = Scm_HashTableRef(modules.table, SCM_OBJ(name), SCM_UNBOUND); (void)SCM_INTERNAL_MUTEX_UNLOCK(modules.mutex); if (SCM_UNBOUNDP(v)) return NULL; else return SCM_MODULE(v); } /* Internal. Lookup module, and if there's none, create one. */ static ScmModule *lookup_module_create(ScmSymbol *name, int *created) { (void)SCM_INTERNAL_MUTEX_LOCK(modules.mutex); ScmDictEntry *e = Scm_HashCoreSearch(SCM_HASH_TABLE_CORE(modules.table), (intptr_t)name, SCM_DICT_CREATE); if (e->value == 0) { (void)SCM_DICT_SET_VALUE(e, make_module(SCM_OBJ(name), NULL)); *created = TRUE; } else { *created = FALSE; } (void)SCM_INTERNAL_MUTEX_UNLOCK(modules.mutex); return SCM_MODULE(e->value); } ScmObj Scm_MakeModule(ScmSymbol *name, int error_if_exists) { if (name == NULL) { return make_module(SCM_FALSE, NULL); } int created; ScmObj r = SCM_OBJ(lookup_module_create(name, &created)); if (!created) { if (error_if_exists) { Scm_Error("couldn't create module '%S': named module already exists", SCM_OBJ(name)); } return SCM_FALSE; } return r; } /* internal API to create an anonymous wrapper module */ ScmObj Scm__MakeWrapperModule(ScmModule *origin, ScmObj prefix) { ScmModule *m = SCM_MODULE(make_module(SCM_FALSE, NULL)); m->parents = SCM_LIST1(SCM_OBJ(origin)); m->mpl = Scm_Cons(SCM_OBJ(m), origin->mpl); m->prefix = prefix; while (SCM_MODULEP(origin->origin)) { origin = SCM_MODULE(origin->origin); } m->origin = SCM_OBJ(origin); return SCM_OBJ(m); } /* Common code to report error on sealed module. One common error is to try to 'use' or 'require' a file that doesn't have module definitions in it. The default error message in that case is perplexing, so we use more helpful message in that case. */ static void err_sealed(ScmObj source, ScmModule *target) { const char *what = ""; if (SCM_MODULEP(source)) what = "import a module"; else what = "create a binding"; if (target == Scm__RequireBaseModule()) { Scm_Error("Attempted to %s (%S) into gauche.require-base. " "This may be caused by trying to 'use' or 'require' a file" " in which no module is defined. Make sure the file has" " define-module/select-module or define-library at the" " beginning.", what, source, SCM_OBJ(target)); } else { Scm_Error("Attempted to %s (%S) in a sealed module: %S", what, source, SCM_OBJ(target)); } } /*---------------------------------------------------------------------- * Finding and modifying bindings */ #define SEARCHED_ARRAY_SIZE 64 /* Keep record of searched modules. we use stack array for small # of modules, in order to avoid consing for typical cases. */ typedef struct { int num_searched; ScmObj searched[SEARCHED_ARRAY_SIZE]; ScmObj more_searched; } module_cache; static inline void init_module_cache(module_cache *c) { c->num_searched = 0; c->more_searched = SCM_NIL; } static inline int module_visited_p(module_cache *c, ScmModule *m) { for (int i=0; i<c->num_searched; i++) { if (SCM_EQ(SCM_OBJ(m), c->searched[i])) return TRUE; } if (!SCM_NULLP(c->more_searched)) { if (!SCM_FALSEP(Scm_Memq(SCM_OBJ(m), c->more_searched))) return TRUE; } return FALSE; } static inline void module_add_visited(module_cache *c, ScmModule *m) { if (c->num_searched < SEARCHED_ARRAY_SIZE) { c->searched[c->num_searched++] = SCM_OBJ(m); } else { c->more_searched = Scm_Cons(SCM_OBJ(m), c->more_searched); } } /* The main logic of global binding search. We factored this out since we need recursive searching in case of phantom binding (see gloc.h about phantom bindings). The flags stay_in_module and external_only corresponds to the flags passed to Scm_FindBinding. The exclude_self flag is only used in recursive search. */ static ScmGloc *search_binding(ScmModule *module, ScmSymbol *symbol, int stay_in_module, int external_only, int exclude_self) { module_cache searched; init_module_cache(&searched); /* First, search from the specified module. In this phase, we just ignore phantom bindings, for we'll search imported bindings later anyway. */ if (!exclude_self) { ScmObj v = Scm_HashTableRef( external_only? module->external : module->internal, SCM_OBJ(symbol), SCM_FALSE); if (SCM_GLOCP(v)) { if (SCM_GLOC_PHANTOM_BINDING_P(SCM_GLOC(v))) { /* If we're here, the symbol is external to MODULE but the real GLOC is somewhere in imported or inherited modules. We turn off external_only switch so that when we search inherited modules we look into it's internal bindings. */ external_only = FALSE; symbol = SCM_GLOC(v)->name; /* in case it's renamed on export */ } else { return SCM_GLOC(v); } } if (stay_in_module) return NULL; module_add_visited(&searched, module); } ScmObj p, mp; /* Next, search from imported modules If the import is prefixed, we avoid caching the result. */ SCM_FOR_EACH(p, module->imported) { ScmObj elt = SCM_CAR(p); ScmObj sym = SCM_OBJ(symbol); int prefixed = FALSE; SCM_ASSERT(SCM_MODULEP(elt)); SCM_FOR_EACH(mp, SCM_MODULE(elt)->mpl) { ScmGloc *g; SCM_ASSERT(SCM_MODULEP(SCM_CAR(mp))); ScmModule *m = SCM_MODULE(SCM_CAR(mp)); if (!prefixed && module_visited_p(&searched, m)) continue; if (SCM_SYMBOLP(m->prefix)) { sym = Scm_SymbolSansPrefix(SCM_SYMBOL(sym), SCM_SYMBOL(m->prefix)); if (!SCM_SYMBOLP(sym)) break; prefixed = TRUE; } ScmObj v = Scm_HashTableRef(m->external, SCM_OBJ(sym), SCM_FALSE); if (SCM_GLOCP(v)) { g = SCM_GLOC(v); if (g->hidden) break; if (SCM_GLOC_PHANTOM_BINDING_P(g)) { g = search_binding(m, g->name, FALSE, FALSE, TRUE); if (g) return g; } else { return g; } } if (!prefixed) module_add_visited(&searched, m); } } /* Then, search from parent modules */ SCM_ASSERT(SCM_PAIRP(module->mpl)); SCM_FOR_EACH(mp, SCM_CDR(module->mpl)) { SCM_ASSERT(SCM_MODULEP(SCM_CAR(mp))); ScmModule *m = SCM_MODULE(SCM_CAR(mp)); if (SCM_SYMBOLP(m->prefix)) { ScmObj sym = Scm_SymbolSansPrefix(symbol, SCM_SYMBOL(m->prefix)); if (!SCM_SYMBOLP(sym)) return NULL; symbol = SCM_SYMBOL(sym); } ScmObj v = Scm_HashTableRef(external_only?m->external:m->internal, SCM_OBJ(symbol), SCM_FALSE); if (SCM_GLOCP(v)) { if (SCM_GLOC_PHANTOM_BINDING_P(SCM_GLOC(v))) { symbol = SCM_GLOC(v)->name; /* in case it's renamed on export */ ScmGloc *g = search_binding(m, symbol, FALSE, FALSE, TRUE); if (g) return g; external_only = FALSE; /* See above comment */ } else { return SCM_GLOC(v); } } } return NULL; } ScmGloc *Scm_FindBinding(ScmModule *module, ScmSymbol *symbol, int flags) { int stay_in_module = flags&SCM_BINDING_STAY_IN_MODULE; int external_only = flags&SCM_BINDING_EXTERNAL; ScmGloc *gloc = NULL; SCM_INTERNAL_MUTEX_SAFE_LOCK_BEGIN(modules.mutex); gloc = search_binding(module, symbol, stay_in_module, external_only, FALSE); SCM_INTERNAL_MUTEX_SAFE_LOCK_END(); return gloc; } ScmObj Scm_GlobalVariableRef(ScmModule *module, ScmSymbol *symbol, int flags) { ScmGloc *g = Scm_FindBinding(module, symbol, flags); if (g == NULL) return SCM_UNBOUND; ScmObj val = SCM_GLOC_GET(g); if (SCM_AUTOLOADP(val)) { /* NB: Scm_ResolveAutoload may return SCM_UNBOUND */ val = Scm_ResolveAutoload(SCM_AUTOLOAD(val), 0); } return val; } /* * Definition. */ ScmGloc *Scm_MakeBinding(ScmModule *module, ScmSymbol *symbol, ScmObj value, int flags) { if (module->sealed) err_sealed(SCM_OBJ(symbol), module); ScmGloc *g; ScmObj oldval = SCM_UNDEFINED; int prev_kind = 0; int kind = ((flags&SCM_BINDING_CONST) ? SCM_BINDING_CONST : ((flags&SCM_BINDING_INLINABLE) ? SCM_BINDING_INLINABLE : 0)); SCM_INTERNAL_MUTEX_SAFE_LOCK_BEGIN(modules.mutex); ScmObj v = Scm_HashTableRef(module->internal, SCM_OBJ(symbol), SCM_FALSE); /* NB: this function bypasses check of gloc setter */ if (SCM_GLOCP(v)) { g = SCM_GLOC(v); if (Scm_GlocConstP(g)) prev_kind = SCM_BINDING_CONST; else if (Scm_GlocInlinableP(g)) prev_kind = SCM_BINDING_INLINABLE; oldval = g->value; } else { g = SCM_GLOC(Scm_MakeGloc(symbol, module)); Scm_HashTableSet(module->internal, SCM_OBJ(symbol), SCM_OBJ(g), 0); /* If module is marked 'export-all', export this binding by default */ if (module->exportAll && SCM_SYMBOL_INTERNED(symbol)) { Scm_HashTableSet(module->external, SCM_OBJ(symbol), SCM_OBJ(g), 0); } } SCM_INTERNAL_MUTEX_SAFE_LOCK_END(); g->value = value; Scm_GlocMark(g, kind); if (prev_kind != 0) { /* NB: Scm_EqualP may throw an error. It won't leave the state inconsistent, but be aware. */ if (prev_kind != kind || !Scm_EqualP(value, oldval)) { Scm_Warn("redefining %s %S::%S", (prev_kind == SCM_BINDING_CONST)? "constant" : "inlinable", g->module->name, g->name); } } return g; } /* Convenience wrapper (return value is ScmObj for the backward compatibility)*/ ScmObj Scm_Define(ScmModule *module, ScmSymbol *symbol, ScmObj value) { return SCM_OBJ(Scm_MakeBinding(module, symbol, value, 0)); } ScmObj Scm_DefineConst(ScmModule *module, ScmSymbol *symbol, ScmObj value) { return SCM_OBJ(Scm_MakeBinding(module, symbol, value, SCM_BINDING_CONST)); } /* * Injecting hidden binding * This inserts a dummy binding with hidden==true so that * the module effectively removes the binding of the given symbol * inherited from parent. * This is not for genreral use. It is intended to be used for * intermediate anonymous modules, created by import handling * routine to implement :except and :rename qualifiers. * Since we assume MODULE is for intermediate modules, we only * insert bindings to the external table, for those modules are * only searched in the 'import' path. */ void Scm_HideBinding(ScmModule *module, ScmSymbol *symbol) { if (module->sealed) err_sealed(SCM_OBJ(symbol), module); int err_exists = FALSE; (void)SCM_INTERNAL_MUTEX_LOCK(modules.mutex); ScmObj v = Scm_HashTableRef(module->external, SCM_OBJ(symbol), SCM_FALSE); if (!SCM_FALSEP(v)) { err_exists = TRUE; } else { ScmGloc *g = SCM_GLOC(Scm_MakeGloc(symbol, module)); g->hidden = TRUE; Scm_HashTableSet(module->external, SCM_OBJ(symbol), SCM_OBJ(g), 0); } (void)SCM_INTERNAL_MUTEX_UNLOCK(modules.mutex); if (err_exists) { Scm_Error("hide-binding: binding already exists: %S (exports=%S)", SCM_OBJ(symbol), Scm_ModuleExports(module)); } } /* * Binding aliasing * This is a special operation to realize :only and :rename import option. * The name ORIGINNAME is looked up in the module ORIGIN to get a gloc. * Then the gloc is directly inserted into the module TARGET under the name * TARGETNAME. * Since gloc is shared, subsequent changes in the binding are also shared. * * If the original binding doesn't exist, or isn't exported, noop and * FALSE is returned. Otherwise TRUE is returned. * * CAVEATS: * * - gloc's module remains the same. * - autoload won't be resolved. * - TARGETNAME shouldn't be bound in TARGET beforehand. We don't check * it and just insert the gloc. If there is an existing binding, * it would become orphaned, possibly causing problems. * * NB: This is the only operation that causes a gloc to be shared between * more than one modules. I'm not yet clear on the implication of such * sharing in general, so this should be used with care. At least it * won't cause much trouble if the target module is an implicit anonymous * module created by :only and :rename import options. */ int Scm_AliasBinding(ScmModule *target, ScmSymbol *targetName, ScmModule *origin, ScmSymbol *originName) { if (target->sealed) err_sealed(SCM_OBJ(targetName), target); ScmGloc *g = Scm_FindBinding(origin, originName, SCM_BINDING_EXTERNAL); if (g == NULL) return FALSE; SCM_INTERNAL_MUTEX_SAFE_LOCK_BEGIN(modules.mutex); Scm_HashTableSet(target->external, SCM_OBJ(targetName), SCM_OBJ(g), 0); Scm_HashTableSet(target->internal, SCM_OBJ(targetName), SCM_OBJ(g), 0); SCM_INTERNAL_MUTEX_SAFE_LOCK_END(); return TRUE; } /* * Import */ ScmObj Scm_ImportModule(ScmModule *module, ScmObj imported, ScmObj prefix, u_long flags SCM_UNUSED) /* reserved for future use */ { if (module->sealed) err_sealed(SCM_OBJ(imported), module); ScmModule *imp = NULL; if (SCM_MODULEP(imported)) { imp = SCM_MODULE(imported); } else if (SCM_SYMBOLP(imported)) { imp = Scm_FindModule(SCM_SYMBOL(imported), 0); } else if (SCM_IDENTIFIERP(imported)) { imp = Scm_FindModule(Scm_UnwrapIdentifier(SCM_IDENTIFIER(imported)), 0); } else { Scm_Error("module name or module required, but got %S", imported); } if (SCM_SYMBOLP(prefix)) { imp = SCM_MODULE(Scm__MakeWrapperModule(imp, prefix)); } /* Preallocate a pair, so that we won't call malloc during locking */ ScmObj p = Scm_Cons(SCM_OBJ(imp), SCM_NIL); /* Prepend imported module to module->imported list. */ (void)SCM_INTERNAL_MUTEX_LOCK(modules.mutex); { ScmObj ms, prev = p; SCM_SET_CDR_UNCHECKED(p, module->imported); /* Remove duplicate module, if any. NB: We allow to import the same module multiple times if they are qualified by :only, :prefix, etc. Theoretically we should check exactly same qualifications, but we hope that kind of duplication is rare. */ SCM_FOR_EACH(ms, SCM_CDR(p)) { ScmModule *m = SCM_MODULE(SCM_CAR(ms)); if (!SCM_EQ(SCM_OBJ(m), SCM_OBJ(imp))) { prev = ms; continue; } SCM_SET_CDR_UNCHECKED(prev, SCM_CDR(ms)); break; } module->imported = p; } (void)SCM_INTERNAL_MUTEX_UNLOCK(modules.mutex); return module->imported; } /* Deprecated */ ScmObj Scm_ImportModules(ScmModule *module, ScmObj list) { ScmObj lp; SCM_FOR_EACH(lp, list) { Scm_ImportModule(module, SCM_CAR(lp), SCM_FALSE, 0); } return module->imported; } /* * Export */ /* <spec> :: <name> | (rename <name> <exported-name>) */ ScmObj Scm_ExportSymbols(ScmModule *module, ScmObj specs) { ScmObj lp; ScmObj overwritten = SCM_NIL; /* list of (exported-name orig-internal-name new-internal-name). */ /* Check input first */ SCM_FOR_EACH(lp, specs) { ScmObj spec = SCM_CAR(lp); if (!(SCM_SYMBOLP(spec) || (SCM_PAIRP(spec) && SCM_PAIRP(SCM_CDR(spec)) && SCM_PAIRP(SCM_CDDR(spec)) && SCM_NULLP(SCM_CDR(SCM_CDDR(spec))) && SCM_EQ(SCM_CAR(spec), SCM_SYM_RENAME) && SCM_SYMBOLP(SCM_CADR(spec)) && SCM_SYMBOLP(SCM_CAR(SCM_CDDR(spec)))))) { Scm_Error("Invalid export-spec; a symbol, or (rename <symbol> <symbol>) is expected, but got %S", spec); } } (void)SCM_INTERNAL_MUTEX_LOCK(modules.mutex); SCM_FOR_EACH(lp, specs) { ScmObj spec = SCM_CAR(lp); ScmSymbol *name, *exported_name; if (SCM_SYMBOLP(spec)) { name = exported_name = SCM_SYMBOL(spec); } else { /* we already knew those are symbols */ name = SCM_SYMBOL(SCM_CADR(spec)); exported_name = SCM_SYMBOL(SCM_CAR(SCM_CDDR(spec))); } ScmDictEntry *e = Scm_HashCoreSearch(SCM_HASH_TABLE_CORE(module->external), (intptr_t)exported_name, SCM_DICT_GET); if (e) { /* If we have e, it's already exported. Check if the previous export is for the same binding. */ SCM_ASSERT(SCM_DICT_VALUE(e) && SCM_GLOCP(SCM_DICT_VALUE(e))); ScmGloc *g = SCM_GLOC(SCM_DICT_VALUE(e)); if (!SCM_EQ(name, g->name)) { /* exported_name got a different meaning. we record it to warn later, then 'unexport' the old one. */ overwritten = Scm_Cons(SCM_LIST3(SCM_OBJ(exported_name), SCM_OBJ(g->name), SCM_OBJ(name)), overwritten); Scm_HashCoreSearch(SCM_HASH_TABLE_CORE(module->external), (intptr_t)exported_name, SCM_DICT_DELETE); e = NULL; } } /* we check again, for the symbol may be unexported above. */ if (e == NULL) { /* This symbol hasn't been exported. Either it only has an internal binding, or there's no binding at all. In the latter case, we create a new binding (without value). */ e = Scm_HashCoreSearch(SCM_HASH_TABLE_CORE(module->internal), (intptr_t)name, SCM_DICT_CREATE); if (!e->value) { ScmGloc *g = SCM_GLOC(Scm_MakeGloc(name, module)); (void)SCM_DICT_SET_VALUE(e, SCM_OBJ(g)); } Scm_HashTableSet(module->external, SCM_OBJ(exported_name), SCM_DICT_VALUE(e), 0); } } (void)SCM_INTERNAL_MUTEX_UNLOCK(modules.mutex); /* Now, if this export changes the meaning of exported symbols, we warn it. We expect this only happens at the development time, when one is fiddling exports incrementally, so we just use Scm_Warn - a library ready to be used shouldn't cause this warning. */ if (!SCM_NULLP(overwritten)) { ScmObj lp; SCM_FOR_EACH(lp, overwritten) { ScmObj p = SCM_CAR(lp); Scm_Warn("Exporting %S from %S as %S overrides the previous export of %S", SCM_CAR(SCM_CDDR(p)), SCM_OBJ(module), SCM_CAR(p), SCM_CADR(p)); } } return SCM_UNDEFINED; /* we might want to return something more useful...*/ } ScmObj Scm_ExportAll(ScmModule *module) { (void)SCM_INTERNAL_MUTEX_LOCK(modules.mutex); if (!module->exportAll) { /* Mark the module 'export-all' so that the new bindings would get exported mark by default. */ module->exportAll = TRUE; /* Scan the module and mark all existing bindings as exported. */ ScmHashIter iter; Scm_HashIterInit(&iter, SCM_HASH_TABLE_CORE(module->internal)); ScmDictEntry *e; while ((e = Scm_HashIterNext(&iter)) != NULL) { ScmDictEntry *ee; ee = Scm_HashCoreSearch(SCM_HASH_TABLE_CORE(module->external), e->key, SCM_DICT_CREATE); if (!ee->value) { (void)SCM_DICT_SET_VALUE(ee, SCM_DICT_VALUE(e)); } } } (void)SCM_INTERNAL_MUTEX_UNLOCK(modules.mutex); return SCM_OBJ(module); } /* Returns list of exported symbols. We assume this is infrequent operation, so we build the list every call. If it becomes a problem, we can cache the result. */ ScmObj Scm_ModuleExports(ScmModule *module) { ScmObj h = SCM_NIL, t = SCM_NIL; (void)SCM_INTERNAL_MUTEX_LOCK(modules.mutex); ScmHashIter iter; Scm_HashIterInit(&iter, SCM_HASH_TABLE_CORE(module->external)); ScmDictEntry *e; while ((e = Scm_HashIterNext(&iter)) != NULL) { SCM_APPEND1(h, t, SCM_DICT_KEY(e)); } (void)SCM_INTERNAL_MUTEX_UNLOCK(modules.mutex); return h; } /*---------------------------------------------------------------------- * Extending (inheriting) modules */ /* Module inheritance obeys the same rule as class inheritance, hence we use monotonic merge. */ /* NB: ExtendModule alters module's precedence list, and may cause unwanted side effects when used carelessly. */ ScmObj Scm_ExtendModule(ScmModule *module, ScmObj supers) { if (module->sealed) { Scm_Error("Attempt to extend a sealed module: %S", SCM_OBJ(module)); } ScmObj seqh = SCM_NIL, seqt = SCM_NIL; ScmObj sp; SCM_FOR_EACH(sp, supers) { if (!SCM_MODULEP(SCM_CAR(sp))) { Scm_Error("non-module object found in the extend syntax: %S", SCM_CAR(sp)); } SCM_APPEND1(seqh, seqt, SCM_MODULE(SCM_CAR(sp))->mpl); } SCM_APPEND1(seqh, seqt, supers); module->parents = supers; ScmObj mpl = Scm_MonotonicMerge1(seqh); if (SCM_FALSEP(mpl)) { Scm_Error("can't extend those modules simultaneously because of inconsistent precedence lists: %S", supers); } module->mpl = Scm_Cons(SCM_OBJ(module), mpl); return module->mpl; } /*---------------------------------------------------------------------- * Module sealing */ /* NB: In general it is a bad idea to "unseal" module, so we only provide an API to make module sealed. However, unsealing might be useful for debugging. */ void Scm_ModuleSeal(ScmModule *module) { module->sealed = TRUE; } /*---------------------------------------------------------------------- * Finding modules */ ScmModule *Scm_FindModule(ScmSymbol *name, int flags) { if (flags & SCM_FIND_MODULE_CREATE) { int created; ScmModule *m = lookup_module_create(name, &created); SCM_ASSERT(m != NULL); /* If the module is ever called with CREATE and PLACEHOLDING flag, turn placeholding flag on. The flag is cleared if the module is ever called with CREATE but without PLACEHOLDING. */ if (created && (flags & SCM_FIND_MODULE_PLACEHOLDING)) { m->placeholding = TRUE; } if (!(flags & SCM_FIND_MODULE_PLACEHOLDING)) { m->placeholding = FALSE; } return m; } else { ScmModule *m = lookup_module(name); if (m == NULL) { if (!(flags & SCM_FIND_MODULE_QUIET)) { Scm_Error("no such module: %S", name); } return NULL; } else { return m; } } } ScmObj Scm_AllModules(void) { ScmObj h = SCM_NIL, t = SCM_NIL; ScmHashIter iter; ScmDictEntry *e; (void)SCM_INTERNAL_MUTEX_LOCK(modules.mutex); Scm_HashIterInit(&iter, SCM_HASH_TABLE_CORE(modules.table)); while ((e = Scm_HashIterNext(&iter)) != NULL) { ScmModule *m = SCM_MODULE(SCM_DICT_VALUE(e)); if (!m->placeholding) { SCM_APPEND1(h, t, SCM_DICT_VALUE(e)); } } (void)SCM_INTERNAL_MUTEX_UNLOCK(modules.mutex); return h; } void Scm_SelectModule(ScmModule *mod) { SCM_ASSERT(SCM_MODULEP(mod)); Scm_VM()->module = mod; } /*---------------------------------------------------------------------- * Module and pathnames */ /* Convert module name and pathname (mod load-path) and vice versa. We moved the definition in Scheme. These are just stubs to call them. */ ScmObj Scm_ModuleNameToPath(ScmSymbol *name) { static ScmObj module_name_to_path_proc = SCM_UNDEFINED; SCM_BIND_PROC(module_name_to_path_proc, "module-name->path", Scm_GaucheModule()); return Scm_ApplyRec1(module_name_to_path_proc, SCM_OBJ(name)); } ScmObj Scm_PathToModuleName(ScmString *path) { static ScmObj path_to_module_name_proc = SCM_UNDEFINED; SCM_BIND_PROC(path_to_module_name_proc, "path->module-name", Scm_GaucheModule()); return Scm_ApplyRec1(path_to_module_name_proc, SCM_OBJ(path)); } /*---------------------------------------------------------------------- * Predefined modules and initialization */ ScmModule *Scm_NullModule(void) { return &nullModule; } ScmModule *Scm_SchemeModule(void) { return &schemeModule; } ScmModule *Scm_GaucheModule(void) { return &gaucheModule; } ScmModule *Scm_GaucheInternalModule(void) { return &internalModule; } ScmModule *Scm_UserModule(void) { return &userModule; } ScmModule *Scm__KeywordModule(void) /* internal */ { return &keywordModule; } ScmModule *Scm__GaucheKeywordModule(void) /* internal */ { return &gkeywordModule; } ScmModule *Scm__RequireBaseModule(void) /* internal */ { return &reqbaseModule; } ScmModule *Scm_CurrentModule(void) { return Scm_VM()->module; } /* NB: we don't need to lock the global module table in initialization */ #define INIT_MOD(mod, mname, mpl, inttab) \ do { \ SCM_SET_CLASS(&mod, SCM_CLASS_MODULE); \ init_module(&mod, mname, inttab); \ Scm_HashTableSet(modules.table, (mod).name, SCM_OBJ(&mod), 0); \ mod.parents = (SCM_NULLP(mpl)? SCM_NIL : SCM_LIST1(SCM_CAR(mpl))); \ mpl = mod.mpl = Scm_Cons(SCM_OBJ(&mod), mpl); \ } while (0) void Scm__InitModule(void) { /* List of builtin modules. We create these so that 'use' or r7rs 'import' won't try to search the file. The modules listed here are marked "provided" at the startup, so it can no longer be loaded by 'use' or 'require'. Don't list modules that needs to be loaded. */ static const char *builtin_modules[] = { "srfi-2", "srfi-6", "srfi-8", "srfi-10", "srfi-16", "srfi-17", "srfi-22", "srfi-23", "srfi-28", "srfi-34", "srfi-35", "srfi-36", "srfi-38", "srfi-45", "srfi-61", "srfi-62", "srfi-87", "srfi-95", "srfi-111", NULL }; const char **modname; (void)SCM_INTERNAL_MUTEX_INIT(modules.mutex); modules.table = SCM_HASH_TABLE(Scm_MakeHashTableSimple(SCM_HASH_EQ, 64)); /* standard module chain */ ScmObj mpl = SCM_NIL; INIT_MOD(nullModule, SCM_SYM_NULL, mpl, NULL); INIT_MOD(schemeModule, SCM_SYM_SCHEME, mpl, NULL); INIT_MOD(keywordModule, SCM_SYM_KEYWORD, mpl, NULL); INIT_MOD(gaucheModule, SCM_SYM_GAUCHE, mpl, NULL); INIT_MOD(gfModule, SCM_SYM_GAUCHE_GF, mpl, NULL); INIT_MOD(userModule, SCM_SYM_USER, mpl, NULL); mpl = SCM_CDR(mpl); /* default mpl doesn't include user module */ defaultParents = SCM_LIST1(SCM_CAR(mpl)); defaultMpl = mpl; /* other modules */ mpl = defaultMpl; INIT_MOD(internalModule, SCM_SYM_GAUCHE_INTERNAL, mpl, NULL); INIT_MOD(reqbaseModule, SCM_INTERN("gauche.require-base"), mpl, NULL); Scm_ModuleSeal(&reqbaseModule); mpl = keywordModule.mpl; INIT_MOD(gkeywordModule, SCM_INTERN("gauche.keyword"), mpl, keywordModule.internal); gkeywordModule.exportAll = TRUE; /* create predefined moudles */ for (modname = builtin_modules; *modname; modname++) { (void)SCM_FIND_MODULE(*modname, SCM_FIND_MODULE_CREATE); } }
121053.c
/******************************************************************************* SYS PORTS Static Functions for PORTS System Service Company: Microchip Technology Inc. File Name: plib_gpio.c Summary: GPIO function implementations for the GPIO PLIB. Description: The GPIO PLIB provides a simple interface to manage peripheral input-output controller. *******************************************************************************/ //DOM-IGNORE-BEGIN /******************************************************************************* * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries. * * Subject to your compliance with these terms, you may use Microchip software * and any derivatives exclusively with Microchip products. It is your * responsibility to comply with third party license terms applicable to your * use of third party software (including open source software) that may * accompany Microchip software. * * THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER * EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A * PARTICULAR PURPOSE. * * IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE, * INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND * WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS * BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE * FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN * ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY, * THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE. *******************************************************************************/ //DOM-IGNORE-END #include "plib_gpio.h" /****************************************************************************** Function: GPIO_Initialize ( void ) Summary: Initialize the GPIO library. Remarks: See plib_gpio.h for more details. */ void GPIO_Initialize ( void ) { /* PORTA Initialization */ ANSELACLR = 0x4001; /* Digital Mode Enable */ /* PORTB Initialization */ ANSELBCLR = 0x1; /* Digital Mode Enable */ /* PORTC Initialization */ /* PORTD Initialization */ /* PORTE Initialization */ /* PORTF Initialization */ /* PORTG Initialization */ /* unlock system for PPS configuration */ SYSKEY = 0x00000000; SYSKEY = 0xAA996655; SYSKEY = 0x556699AA; CFGCONbits.IOLOCK = 0; /* PPS Input Remapping */ /* PPS Output Remapping */ RPB0R = 5; RPA0R = 10; RPA14R = 6; RPA0R = 5; RPB0R = 5; RPA14R = 5; /* Lock back the system after PPS configuration */ SYSKEY = 0x00000000; SYSKEY = 0xAA996655; SYSKEY = 0x556699AA; CFGCONbits.IOLOCK = 1; } // ***************************************************************************** // ***************************************************************************** // Section: GPIO APIs which operates on multiple pins of a port // ***************************************************************************** // ***************************************************************************** // ***************************************************************************** /* Function: uint32_t GPIO_PortRead ( GPIO_PORT port ) Summary: Read all the I/O lines of the selected port. Description: This function reads the live data values on all the I/O lines of the selected port. Bit values returned in each position indicate corresponding pin levels. 1 = Pin is high. 0 = Pin is low. This function reads the value regardless of pin configuration, whether it is set as as an input, driven by the GPIO Controller, or driven by a peripheral. Remarks: If the port has less than 32-bits, unimplemented pins will read as low (0). Implemented pins are Right aligned in the 32-bit return value. */ uint32_t GPIO_PortRead(GPIO_PORT port) { return (*(volatile uint32_t *)(&PORTA + (port * 0x40))); } // ***************************************************************************** /* Function: void GPIO_PortWrite (GPIO_PORT port, uint32_t mask, uint32_t value); Summary: Write the value on the masked I/O lines of the selected port. Remarks: See plib_gpio.h for more details. */ void GPIO_PortWrite(GPIO_PORT port, uint32_t mask, uint32_t value) { *(volatile uint32_t *)(&LATA + (port * 0x40)) = (*(volatile uint32_t *)(&LATA + (port * 0x40)) & (~mask)) | (mask & value); } // ***************************************************************************** /* Function: uint32_t GPIO_PortLatchRead ( GPIO_PORT port ) Summary: Read the latched value on all the I/O lines of the selected port. Remarks: See plib_gpio.h for more details. */ uint32_t GPIO_PortLatchRead(GPIO_PORT port) { return (*(volatile uint32_t *)(&LATA + (port * 0x40))); } // ***************************************************************************** /* Function: void GPIO_PortSet ( GPIO_PORT port, uint32_t mask ) Summary: Set the selected IO pins of a port. Remarks: See plib_gpio.h for more details. */ void GPIO_PortSet(GPIO_PORT port, uint32_t mask) { *(volatile uint32_t *)(&LATASET + (port * 0x40)) = mask; } // ***************************************************************************** /* Function: void GPIO_PortClear ( GPIO_PORT port, uint32_t mask ) Summary: Clear the selected IO pins of a port. Remarks: See plib_gpio.h for more details. */ void GPIO_PortClear(GPIO_PORT port, uint32_t mask) { *(volatile uint32_t *)(&LATACLR + (port * 0x40)) = mask; } // ***************************************************************************** /* Function: void GPIO_PortToggle ( GPIO_PORT port, uint32_t mask ) Summary: Toggles the selected IO pins of a port. Remarks: See plib_gpio.h for more details. */ void GPIO_PortToggle(GPIO_PORT port, uint32_t mask) { *(volatile uint32_t *)(&LATAINV + (port * 0x40))= mask; } // ***************************************************************************** /* Function: void GPIO_PortInputEnable ( GPIO_PORT port, uint32_t mask ) Summary: Enables selected IO pins of a port as input. Remarks: See plib_gpio.h for more details. */ void GPIO_PortInputEnable(GPIO_PORT port, uint32_t mask) { *(volatile uint32_t *)(&TRISASET + (port * 0x40)) = mask; } // ***************************************************************************** /* Function: void GPIO_PortOutputEnable ( GPIO_PORT port, uint32_t mask ) Summary: Enables selected IO pins of a port as output(s). Remarks: See plib_gpio.h for more details. */ void GPIO_PortOutputEnable(GPIO_PORT port, uint32_t mask) { *(volatile uint32_t *)(&TRISACLR + (port * 0x40)) = mask; } /******************************************************************************* End of File */
954930.c
/* * rawrm.c * * Copyright 2004 by Anthony Howe. All rights reserved. * * usage: rawrm list-file */ #include <stdio.h> #include <stdlib.h> static char line[BUFSIZ]; long TextInputLine(FILE *fp, char *line, long size) { long i; for (i = 0, --size; i < size; ++i) { line[i] = (char) fgetc(fp); if (feof(fp) || ferror(fp)) return -1; if (line[i] == '\n') { line[i] = '\0'; if (0 < i && line[i-1] == '\r') line[--i] = '\0'; break; } } line[i] = '\0'; return i; } int main(int argc, char **argv) { long length; if (argc != 1) { fprintf(stderr, "usage: rawrm <file-list\n"); return 2; } while (0 <= (length = TextInputLine(stdin, line, sizeof (line)))) { if (length != 0 && unlink(line)) fprintf(stdout, "%s not removed\n", line); } return 0; }
361201.c
#include <stdlib.h> #include <stdint.h> #include <string.h> #include "freertos/FreeRTOS.h" #include "freertos/task.h" #include "esp_err.h" #include "esp_log.h" #include "mbedtls/aes.h" #include "mbedtls/cmac.h" #include "mbedtls/md5.h" #include "mbedtls/sha256.h" #include "error.h" #include "utils.h" static const char *TAG = "util"; int random_int(int min, int max) { return min + (int) random() % (max + 1 - min); } uint16_t clamp(uint16_t value, uint16_t min, uint16_t max) { if (value < min) return min; if (value > max) return max; return value; } size_t round_up(size_t len, uint16_t block_size) { return ((len + block_size - 1) & (-block_size)); } static esp_err_t aes128(int mode, const uint8_t *src, size_t src_len, uint8_t *dst, size_t dst_len, const uint8_t key[AES128_BLOCK_SIZE]) { ARG_CHECK(src != NULL, ERR_PARAM_NULL); ARG_CHECK(dst != NULL, ERR_PARAM_NULL); ARG_CHECK(key != NULL, ERR_PARAM_NULL); ARG_CHECK(src_len > 0, ERR_PARAM_LE_ZERO); ARG_CHECK(dst_len >= src_len, "dst_len must be >= src_len"); if (mode == MBEDTLS_AES_DECRYPT) { ARG_CHECK(src_len % AES128_BLOCK_SIZE == 0, "src_len: %d must be a multiple of %d", src_len, AES128_BLOCK_SIZE); } else { ARG_CHECK(dst_len % AES128_BLOCK_SIZE == 0, "dst_len: %d must be a multiple of %d", dst_len, AES128_BLOCK_SIZE); } ESP_LOGD(TAG, "mode: %s", mode == MBEDTLS_AES_DECRYPT ? "DECRYPT" : "ENCRYPT"); ESP_LOGD(TAG, "src_len: %d", src_len); ESP_LOGD(TAG, "dst_len: %d", dst_len); mbedtls_aes_context ctx; mbedtls_aes_init(&ctx); int err = mbedtls_aes_setkey_enc(&ctx, key, AES128_BLOCK_SIZE * 8); if (err != 0) { ESP_LOGW(TAG, "mbedtls_aes_setkey_enc, error: %d", err); goto fail; } ssize_t len = src_len; uint8_t tmp[AES128_BLOCK_SIZE]; while (len > 0) { err = mbedtls_aes_crypt_ecb(&ctx, mode, src, dst); if (err != 0) { ESP_LOGW(TAG, "mbedtls_aes_crypt_ecb, error: %d", err); goto fail; } len -= AES128_BLOCK_SIZE; src += AES128_BLOCK_SIZE; dst += AES128_BLOCK_SIZE; if (mode == MBEDTLS_AES_ENCRYPT && len > 0 && len < AES128_BLOCK_SIZE) { for (int i = 0; i < len; ++i) { tmp[i] = src[i]; } ESP_ERROR_CHECK(pkcs_7_add_padding(tmp, (size_t *) &len, AES128_BLOCK_SIZE)); src = tmp; } } mbedtls_aes_free(&ctx); return ESP_OK; fail: mbedtls_aes_free(&ctx); return ESP_FAIL; } esp_err_t aes128_decrypt(const uint8_t *src, size_t src_len, uint8_t *dst, size_t dst_len, const uint8_t key[AES128_BLOCK_SIZE]) { return aes128(MBEDTLS_AES_DECRYPT, src, src_len, dst, dst_len, key); } esp_err_t aes128_encrypt(const uint8_t *src, size_t src_len, uint8_t *dst, size_t dst_len, const uint8_t key[AES128_BLOCK_SIZE]) { return aes128(MBEDTLS_AES_ENCRYPT, src, src_len, dst, dst_len, key); } esp_err_t pkcs_7_strip_padding(uint8_t *buf, size_t *len) { ARG_CHECK(buf != NULL, ERR_PARAM_NULL); ARG_CHECK(*len > 0, ERR_PARAM_LE_ZERO); const size_t max = *len; if (max < 16) { return ESP_OK; } const uint8_t last = buf[max - 1]; ESP_LOGD(TAG, "Checking for PKCS #7 padding. [%d] -> %d", max - 1, last); if (last == 0 || last >= 16) { return ESP_OK; } for (size_t i = max - last; i < max - 1; i++) { if (buf[i] != last) { ESP_LOGV(TAG, "[%d] -> %d != %d", i, buf[i], last); return ESP_OK; } } ESP_LOGD(TAG, "Stripping from %d -> %d due to PKCS #7 padding", max - last, max - 1); memset(&buf[max - last], 0, last); *len -= last; return ESP_OK; } esp_err_t pkcs_7_add_padding(uint8_t *buf, size_t *len, size_t max_size) { ARG_CHECK(buf != NULL, ERR_PARAM_NULL); ARG_CHECK(*len > 0, ERR_PARAM_LE_ZERO); ARG_CHECK(max_size >= round_up(*len, AES128_BLOCK_SIZE), "max_size < round_up(*len, AES128_BLOCK_SIZE)"); size_t padding = *len % AES128_BLOCK_SIZE; if (padding == 0) { ESP_LOGD(TAG, "No padding necessary"); return ESP_OK; } ESP_LOGD(TAG, "Adding %d bytes of padding", padding); buf += *len; for (int i = 0; i < padding; ++i) { *buf++ = padding; } *len += padding; return ESP_OK; } esp_err_t md5(const uint8_t *buf, size_t len, uint8_t output[16]) { ARG_CHECK(buf != NULL, ERR_PARAM_NULL); ARG_CHECK(len > 0, ERR_PARAM_LE_ZERO); ARG_CHECK(output != NULL, ERR_PARAM_NULL); int err = mbedtls_md5_ret(buf, len, output); if (err == 0) { return ESP_OK; } ESP_LOGW(TAG, "mbedtls_md5_ret, error: %d", err); return ESP_FAIL; } esp_err_t sha256(const uint8_t *buf, const size_t len, unsigned char output[32]) { ARG_CHECK(buf != NULL, ERR_PARAM_NULL); ARG_CHECK(len > 0, ERR_PARAM_LE_ZERO); ARG_CHECK(output != NULL, ERR_PARAM_NULL); output[0] = '\0'; int err = mbedtls_sha256_ret(buf, len, output, 0 /* SHA-256 */); if (err == 0) { return ESP_OK; } ESP_LOGW(TAG, "mbedtls_sha256_ret, error: %d", err); return ESP_FAIL; } const char *enum_from_value(const ProtobufCEnumDescriptor *descriptor, int value) { ARG_ERROR_CHECK(descriptor != NULL, ERR_PARAM_NULL); for (int i = 0; i < descriptor->n_values; ++i) { if (descriptor->values[i].value == value) { return descriptor->values[i].name; } } return "???"; } uint32_t enum_max(const ProtobufCEnumDescriptor *descriptor) { ARG_ERROR_CHECK(descriptor != NULL, ERR_PARAM_NULL); return descriptor->n_values; } void safe_delay_us(uint32_t delay_us) { if (delay_us * 1000 < portTICK_PERIOD_MS) { ets_delay_us(delay_us); return; } safe_delay_ms(delay_us / 1000); } void safe_delay_ms(uint32_t delay_ms) { if (delay_ms < portTICK_PERIOD_MS) { ets_delay_us(delay_ms * 1000); return; } vTaskDelay(pdMS_TO_TICKS(delay_ms)); } double lin_regression(const double coeff[], size_t coeff_size, double value) { double ret = 0; double x = 1; for (int i = 0; i < coeff_size; ++i) { ret += coeff[coeff_size - 1 - i] * x; if (i == 0) { x = value; } else { x *= value; } } return ret; }
148999.c
/* * drivers/base/devres.c - device resource management * * Copyright (c) 2006 SUSE Linux Products GmbH * Copyright (c) 2006 Tejun Heo <teheo@suse.de> * * This file is released under the GPLv2. */ #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include "base.h" struct devres_node { struct list_head entry; dr_release_t release; #ifdef CONFIG_DEBUG_DEVRES const char *name; size_t size; #endif }; struct devres { struct devres_node node; /* -- 3 pointers */ unsigned long long data[]; /* guarantee ull alignment */ }; struct devres_group { struct devres_node node[2]; void *id; int color; /* -- 8 pointers */ }; #ifdef CONFIG_DEBUG_DEVRES static int log_devres = 0; module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); static void set_node_dbginfo(struct devres_node *node, const char *name, size_t size) { node->name = name; node->size = size; } static void devres_log(struct device *dev, struct devres_node *node, const char *op) { if (unlikely(log_devres)) dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n", op, node, node->name, (unsigned long)node->size); } #else /* CONFIG_DEBUG_DEVRES */ #define set_node_dbginfo(node, n, s) do {} while (0) #define devres_log(dev, node, op) do {} while (0) #endif /* CONFIG_DEBUG_DEVRES */ /* * Release functions for devres group. These callbacks are used only * for identification. */ static void group_open_release(struct device *dev, void *res) { /* noop */ } static void group_close_release(struct device *dev, void *res) { /* noop */ } static struct devres_group * node_to_group(struct devres_node *node) { if (node->release == &group_open_release) return container_of(node, struct devres_group, node[0]); if (node->release == &group_close_release) return container_of(node, struct devres_group, node[1]); return NULL; } static __always_inline struct devres * alloc_dr(dr_release_t release, size_t size, gfp_t gfp) { size_t tot_size = sizeof(struct devres) + size; struct devres *dr; dr = kmalloc_track_caller(tot_size, gfp); if (unlikely(!dr)) return NULL; memset(dr, 0, offsetof(struct devres, data)); INIT_LIST_HEAD(&dr->node.entry); dr->node.release = release; return dr; } static void add_dr(struct device *dev, struct devres_node *node) { devres_log(dev, node, "ADD"); BUG_ON(!list_empty(&node->entry)); list_add_tail(&node->entry, &dev->devres_head); } #ifdef CONFIG_DEBUG_DEVRES void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp, const char *name) { struct devres *dr; dr = alloc_dr(release, size, gfp | __GFP_ZERO); if (unlikely(!dr)) return NULL; set_node_dbginfo(&dr->node, name, size); return dr->data; } EXPORT_SYMBOL_GPL(__devres_alloc); #else /** * devres_alloc - Allocate device resource data * @release: Release function devres will be associated with * @size: Allocation size * @gfp: Allocation flags * * Allocate devres of @size bytes. The allocated area is zeroed, then * associated with @release. The returned pointer can be passed to * other devres_*() functions. * * RETURNS: * Pointer to allocated devres on success, NULL on failure. */ void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp) { struct devres *dr; dr = alloc_dr(release, size, gfp | __GFP_ZERO); if (unlikely(!dr)) return NULL; return dr->data; } EXPORT_SYMBOL_GPL(devres_alloc); #endif /** * devres_for_each_res - Resource iterator * @dev: Device to iterate resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * @fn: Function to be called for each matched resource. * @data: Data for @fn, the 3rd parameter of @fn * * Call @fn for each devres of @dev which is associated with @release * and for which @match returns 1. * * RETURNS: * void */ void devres_for_each_res(struct device *dev, dr_release_t release, dr_match_t match, void *match_data, void (*fn)(struct device *, void *, void *), void *data) { struct devres_node *node; struct devres_node *tmp; unsigned long flags; if (!fn) return; spin_lock_irqsave(&dev->devres_lock, flags); list_for_each_entry_safe_reverse(node, tmp, &dev->devres_head, entry) { struct devres *dr = container_of(node, struct devres, node); if (node->release != release) continue; if (match && !match(dev, dr->data, match_data)) continue; fn(dev, dr->data, data); } spin_unlock_irqrestore(&dev->devres_lock, flags); } EXPORT_SYMBOL_GPL(devres_for_each_res); /** * devres_free - Free device resource data * @res: Pointer to devres data to free * * Free devres created with devres_alloc(). */ void devres_free(void *res) { if (res) { struct devres *dr = container_of(res, struct devres, data); BUG_ON(!list_empty(&dr->node.entry)); kfree(dr); } } EXPORT_SYMBOL_GPL(devres_free); /** * devres_add - Register device resource * @dev: Device to add resource to * @res: Resource to register * * Register devres @res to @dev. @res should have been allocated * using devres_alloc(). On driver detach, the associated release * function will be invoked and devres will be freed automatically. */ void devres_add(struct device *dev, void *res) { struct devres *dr = container_of(res, struct devres, data); unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); add_dr(dev, &dr->node); spin_unlock_irqrestore(&dev->devres_lock, flags); } EXPORT_SYMBOL_GPL(devres_add); static struct devres *find_dr(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { struct devres_node *node; list_for_each_entry_reverse(node, &dev->devres_head, entry) { struct devres *dr = container_of(node, struct devres, node); if (node->release != release) continue; if (match && !match(dev, dr->data, match_data)) continue; return dr; } return NULL; } /** * devres_find - Find device resource * @dev: Device to lookup resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev which is associated with @release * and for which @match returns 1. If @match is NULL, it's considered * to match all. * * RETURNS: * Pointer to found devres, NULL if not found. */ void * devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { struct devres *dr; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); dr = find_dr(dev, release, match, match_data); spin_unlock_irqrestore(&dev->devres_lock, flags); if (dr) return dr->data; return NULL; } EXPORT_SYMBOL_GPL(devres_find); /** * devres_get - Find devres, if non-existent, add one atomically * @dev: Device to lookup or add devres for * @new_res: Pointer to new initialized devres to add if not found * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev which has the same release function * as @new_res and for which @match return 1. If found, @new_res is * freed; otherwise, @new_res is added atomically. * * RETURNS: * Pointer to found or added devres. */ void * devres_get(struct device *dev, void *new_res, dr_match_t match, void *match_data) { struct devres *new_dr = container_of(new_res, struct devres, data); struct devres *dr; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); dr = find_dr(dev, new_dr->node.release, match, match_data); if (!dr) { add_dr(dev, &new_dr->node); dr = new_dr; new_dr = NULL; } spin_unlock_irqrestore(&dev->devres_lock, flags); devres_free(new_dr); return dr->data; } EXPORT_SYMBOL_GPL(devres_get); /** * devres_remove - Find a device resource and remove it * @dev: Device to find resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev associated with @release and for * which @match returns 1. If @match is NULL, it's considered to * match all. If found, the resource is removed atomically and * returned. * * RETURNS: * Pointer to removed devres on success, NULL if not found. */ void * devres_remove(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { struct devres *dr; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); dr = find_dr(dev, release, match, match_data); if (dr) { list_del_init(&dr->node.entry); devres_log(dev, &dr->node, "REM"); } spin_unlock_irqrestore(&dev->devres_lock, flags); if (dr) return dr->data; return NULL; } EXPORT_SYMBOL_GPL(devres_remove); /** * devres_destroy - Find a device resource and destroy it * @dev: Device to find resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev associated with @release and for * which @match returns 1. If @match is NULL, it's considered to * match all. If found, the resource is removed atomically and freed. * * Note that the release function for the resource will not be called, * only the devres-allocated data will be freed. The caller becomes * responsible for freeing any other data. * * RETURNS: * 0 if devres is found and freed, -ENOENT if not found. */ int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { void *res; res = devres_remove(dev, release, match, match_data); if (unlikely(!res)) return -ENOENT; devres_free(res); return 0; } EXPORT_SYMBOL_GPL(devres_destroy); /** * devres_release - Find a device resource and destroy it, calling release * @dev: Device to find resource from * @release: Look for resources associated with this release function * @match: Match function (optional) * @match_data: Data for the match function * * Find the latest devres of @dev associated with @release and for * which @match returns 1. If @match is NULL, it's considered to * match all. If found, the resource is removed atomically, the * release function called and the resource freed. * * RETURNS: * 0 if devres is found and freed, -ENOENT if not found. */ int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data) { void *res; res = devres_remove(dev, release, match, match_data); if (unlikely(!res)) return -ENOENT; (*release)(dev, res); devres_free(res); return 0; } EXPORT_SYMBOL_GPL(devres_release); static int remove_nodes(struct device *dev, struct list_head *first, struct list_head *end, struct list_head *todo) { int cnt = 0, nr_groups = 0; struct list_head *cur; /* First pass - move normal devres entries to @todo and clear * devres_group colors. */ cur = first; while (cur != end) { struct devres_node *node; struct devres_group *grp; node = list_entry(cur, struct devres_node, entry); cur = cur->next; grp = node_to_group(node); if (grp) { /* clear color of group markers in the first pass */ grp->color = 0; nr_groups++; } else { /* regular devres entry */ if (&node->entry == first) first = first->next; list_move_tail(&node->entry, todo); cnt++; } } if (!nr_groups) return cnt; /* Second pass - Scan groups and color them. A group gets * color value of two iff the group is wholly contained in * [cur, end). That is, for a closed group, both opening and * closing markers should be in the range, while just the * opening marker is enough for an open group. */ cur = first; while (cur != end) { struct devres_node *node; struct devres_group *grp; node = list_entry(cur, struct devres_node, entry); cur = cur->next; grp = node_to_group(node); BUG_ON(!grp || list_empty(&grp->node[0].entry)); grp->color++; if (list_empty(&grp->node[1].entry)) grp->color++; BUG_ON(grp->color <= 0 || grp->color > 2); if (grp->color == 2) { /* No need to update cur or end. The removed * nodes are always before both. */ list_move_tail(&grp->node[0].entry, todo); list_del_init(&grp->node[1].entry); } } return cnt; } static int release_nodes(struct device *dev, struct list_head *first, struct list_head *end, unsigned long flags) __releases(&dev->devres_lock) { LIST_HEAD(todo); int cnt; struct devres *dr, *tmp; cnt = remove_nodes(dev, first, end, &todo); spin_unlock_irqrestore(&dev->devres_lock, flags); /* Release. Note that both devres and devres_group are * handled as devres in the following loop. This is safe. */ list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) { devres_log(dev, &dr->node, "REL"); dr->node.release(dev, dr->data); kfree(dr); } return cnt; } /** * devres_release_all - Release all managed resources * @dev: Device to release resources for * * Release all resources associated with @dev. This function is * called on driver detach. */ int devres_release_all(struct device *dev) { unsigned long flags; /* Looks like an uninitialized device structure */ if (WARN_ON(dev->devres_head.next == NULL)) return -ENODEV; spin_lock_irqsave(&dev->devres_lock, flags); return release_nodes(dev, dev->devres_head.next, &dev->devres_head, flags); } /** * devres_open_group - Open a new devres group * @dev: Device to open devres group for * @id: Separator ID * @gfp: Allocation flags * * Open a new devres group for @dev with @id. For @id, using a * pointer to an object which won't be used for another group is * recommended. If @id is NULL, address-wise unique ID is created. * * RETURNS: * ID of the new group, NULL on failure. */ void * devres_open_group(struct device *dev, void *id, gfp_t gfp) { struct devres_group *grp; unsigned long flags; grp = kmalloc(sizeof(*grp), gfp); if (unlikely(!grp)) return NULL; grp->node[0].release = &group_open_release; grp->node[1].release = &group_close_release; INIT_LIST_HEAD(&grp->node[0].entry); INIT_LIST_HEAD(&grp->node[1].entry); set_node_dbginfo(&grp->node[0], "grp<", 0); set_node_dbginfo(&grp->node[1], "grp>", 0); grp->id = grp; if (id) grp->id = id; spin_lock_irqsave(&dev->devres_lock, flags); add_dr(dev, &grp->node[0]); spin_unlock_irqrestore(&dev->devres_lock, flags); return grp->id; } EXPORT_SYMBOL_GPL(devres_open_group); /* Find devres group with ID @id. If @id is NULL, look for the latest. */ static struct devres_group * find_group(struct device *dev, void *id) { struct devres_node *node; list_for_each_entry_reverse(node, &dev->devres_head, entry) { struct devres_group *grp; if (node->release != &group_open_release) continue; grp = container_of(node, struct devres_group, node[0]); if (id) { if (grp->id == id) return grp; } else if (list_empty(&grp->node[1].entry)) return grp; } return NULL; } /** * devres_close_group - Close a devres group * @dev: Device to close devres group for * @id: ID of target group, can be NULL * * Close the group identified by @id. If @id is NULL, the latest open * group is selected. */ void devres_close_group(struct device *dev, void *id) { struct devres_group *grp; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); grp = find_group(dev, id); if (grp) add_dr(dev, &grp->node[1]); else WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); } EXPORT_SYMBOL_GPL(devres_close_group); /** * devres_remove_group - Remove a devres group * @dev: Device to remove group for * @id: ID of target group, can be NULL * * Remove the group identified by @id. If @id is NULL, the latest * open group is selected. Note that removing a group doesn't affect * any other resources. */ void devres_remove_group(struct device *dev, void *id) { struct devres_group *grp; unsigned long flags; spin_lock_irqsave(&dev->devres_lock, flags); grp = find_group(dev, id); if (grp) { list_del_init(&grp->node[0].entry); list_del_init(&grp->node[1].entry); devres_log(dev, &grp->node[0], "REM"); } else WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); kfree(grp); } EXPORT_SYMBOL_GPL(devres_remove_group); /** * devres_release_group - Release resources in a devres group * @dev: Device to release group for * @id: ID of target group, can be NULL * * Release all resources in the group identified by @id. If @id is * NULL, the latest open group is selected. The selected group and * groups properly nested inside the selected group are removed. * * RETURNS: * The number of released non-group resources. */ int devres_release_group(struct device *dev, void *id) { struct devres_group *grp; unsigned long flags; int cnt = 0; spin_lock_irqsave(&dev->devres_lock, flags); grp = find_group(dev, id); if (grp) { struct list_head *first = &grp->node[0].entry; struct list_head *end = &dev->devres_head; if (!list_empty(&grp->node[1].entry)) end = grp->node[1].entry.next; cnt = release_nodes(dev, first, end, flags); } else { WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); } return cnt; } EXPORT_SYMBOL_GPL(devres_release_group); /* * Custom devres actions allow inserting a simple function call * into the teadown sequence. */ struct action_devres { void *data; void (*action)(void *); }; static int devm_action_match(struct device *dev, void *res, void *p) { struct action_devres *devres = res; struct action_devres *target = p; return devres->action == target->action && devres->data == target->data; } static void devm_action_release(struct device *dev, void *res) { struct action_devres *devres = res; devres->action(devres->data); } /** * devm_add_action() - add a custom action to list of managed resources * @dev: Device that owns the action * @action: Function that should be called * @data: Pointer to data passed to @action implementation * * This adds a custom action to the list of managed resources so that * it gets executed as part of standard resource unwinding. */ int devm_add_action(struct device *dev, void (*action)(void *), void *data) { struct action_devres *devres; devres = devres_alloc(devm_action_release, sizeof(struct action_devres), GFP_KERNEL); if (!devres) return -ENOMEM; devres->data = data; devres->action = action; devres_add(dev, devres); return 0; } EXPORT_SYMBOL_GPL(devm_add_action); /** * devm_remove_action() - removes previously added custom action * @dev: Device that owns the action * @action: Function implementing the action * @data: Pointer to data passed to @action implementation * * Removes instance of @action previously added by devm_add_action(). * Both action and data should match one of the existing entries. */ void devm_remove_action(struct device *dev, void (*action)(void *), void *data) { struct action_devres devres = { .data = data, .action = action, }; WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, &devres)); } EXPORT_SYMBOL_GPL(devm_remove_action); /* * Managed kmalloc/kfree */ static void devm_kmalloc_release(struct device *dev, void *res) { /* noop */ } static int devm_kmalloc_match(struct device *dev, void *res, void *data) { return res == data; } /** * devm_kmalloc - Resource-managed kmalloc * @dev: Device to allocate memory for * @size: Allocation size * @gfp: Allocation gfp flags * * Managed kmalloc. Memory allocated with this function is * automatically freed on driver detach. Like all other devres * resources, guaranteed alignment is unsigned long long. * * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) { struct devres *dr; /* use raw alloc_dr for kmalloc caller tracing */ dr = alloc_dr(devm_kmalloc_release, size, gfp); if (unlikely(!dr)) return NULL; /* * This is named devm_kzalloc_release for historical reasons * The initial implementation did not support kmalloc, only kzalloc */ set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); devres_add(dev, dr->data); return dr->data; } EXPORT_SYMBOL_GPL(devm_kmalloc); /** * devm_kstrdup - Allocate resource managed space and * copy an existing string into that. * @dev: Device to allocate memory for * @s: the string to duplicate * @gfp: the GFP mask used in the devm_kmalloc() call when * allocating memory * RETURNS: * Pointer to allocated string on success, NULL on failure. */ char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) { size_t size; char *buf; if (!s) return NULL; size = strlen(s) + 1; buf = devm_kmalloc(dev, size, gfp); if (buf) memcpy(buf, s, size); return buf; } EXPORT_SYMBOL_GPL(devm_kstrdup); /** * devm_kvasprintf - Allocate resource managed space and format a string * into that. * @dev: Device to allocate memory for * @gfp: the GFP mask used in the devm_kmalloc() call when * allocating memory * @fmt: The printf()-style format string * @ap: Arguments for the format string * RETURNS: * Pointer to allocated string on success, NULL on failure. */ char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) { unsigned int len; char *p; va_list aq; va_copy(aq, ap); len = vsnprintf(NULL, 0, fmt, aq); va_end(aq); p = devm_kmalloc(dev, len+1, gfp); if (!p) return NULL; vsnprintf(p, len+1, fmt, ap); return p; } EXPORT_SYMBOL(devm_kvasprintf); /** * devm_kasprintf - Allocate resource managed space and format a string * into that. * @dev: Device to allocate memory for * @gfp: the GFP mask used in the devm_kmalloc() call when * allocating memory * @fmt: The printf()-style format string * @...: Arguments for the format string * RETURNS: * Pointer to allocated string on success, NULL on failure. */ char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = devm_kvasprintf(dev, gfp, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL_GPL(devm_kasprintf); /** * devm_kfree - Resource-managed kfree * @dev: Device this memory belongs to * @p: Memory to free * * Free memory allocated with devm_kmalloc(). */ void devm_kfree(struct device *dev, void *p) { int rc; rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p); WARN_ON(rc); } EXPORT_SYMBOL_GPL(devm_kfree); /** * devm_kmemdup - Resource-managed kmemdup * @dev: Device this memory belongs to * @src: Memory region to duplicate * @len: Memory region length * @gfp: GFP mask to use * * Duplicate region of a memory using resource managed kmalloc */ void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) { void *p; p = devm_kmalloc(dev, len, gfp); if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL_GPL(devm_kmemdup); struct pages_devres { unsigned long addr; unsigned int order; }; static int devm_pages_match(struct device *dev, void *res, void *p) { struct pages_devres *devres = res; struct pages_devres *target = p; return devres->addr == target->addr; } static void devm_pages_release(struct device *dev, void *res) { struct pages_devres *devres = res; free_pages(devres->addr, devres->order); } /** * devm_get_free_pages - Resource-managed __get_free_pages * @dev: Device to allocate memory for * @gfp_mask: Allocation gfp flags * @order: Allocation size is (1 << order) pages * * Managed get_free_pages. Memory allocated with this function is * automatically freed on driver detach. * * RETURNS: * Address of allocated memory on success, 0 on failure. */ unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order) { struct pages_devres *devres; unsigned long addr; addr = __get_free_pages(gfp_mask, order); if (unlikely(!addr)) return 0; devres = devres_alloc(devm_pages_release, sizeof(struct pages_devres), GFP_KERNEL); if (unlikely(!devres)) { free_pages(addr, order); return 0; } devres->addr = addr; devres->order = order; devres_add(dev, devres); return addr; } EXPORT_SYMBOL_GPL(devm_get_free_pages); /** * devm_free_pages - Resource-managed free_pages * @dev: Device this memory belongs to * @addr: Memory to free * * Free memory allocated with devm_get_free_pages(). Unlike free_pages, * there is no need to supply the @order. */ void devm_free_pages(struct device *dev, unsigned long addr) { struct pages_devres devres = { .addr = addr }; WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, &devres)); } EXPORT_SYMBOL_GPL(devm_free_pages);
346547.c
// RUN: %llvmgcc %s -S -o - | grep llvm.global_ctors void foo() __attribute__((constructor)); void foo() { bar(); }
288732.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE134_Uncontrolled_Format_String__char_connect_socket_w32_vsnprintf_16.c Label Definition File: CWE134_Uncontrolled_Format_String.vasinks.label.xml Template File: sources-vasinks-16.tmpl.c */ /* * @description * CWE: 134 Uncontrolled Format String * BadSource: connect_socket Read data using a connect socket (client side) * GoodSource: Copy a fixed string into data * Sinks: w32_vsnprintf * GoodSink: vsnprintf with a format string * BadSink : vsnprintf without a format string * Flow Variant: 16 Control flow: while(1) * * */ #include <stdarg.h> #include "std_testcase.h" #ifndef _WIN32 #include <wchar.h> #endif #ifdef _WIN32 #include <winsock2.h> #include <windows.h> #include <direct.h> #pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */ #define CLOSE_SOCKET closesocket #else /* NOT _WIN32 */ #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #define INVALID_SOCKET -1 #define SOCKET_ERROR -1 #define CLOSE_SOCKET close #define SOCKET int #endif #define TCP_PORT 27015 #define IP_ADDRESS "127.0.0.1" #ifndef OMITBAD static void badVaSinkB(char * data, ...) { { char dest[100] = ""; va_list args; va_start(args, data); /* POTENTIAL FLAW: Do not specify the format allowing a possible format string vulnerability */ vsnprintf(dest, 100-1, data, args); va_end(args); printLine(dest); } } void CWE134_Uncontrolled_Format_String__char_connect_socket_w32_vsnprintf_16_bad() { char * data; char dataBuffer[100] = ""; data = dataBuffer; while(1) { { #ifdef _WIN32 WSADATA wsaData; int wsaDataInit = 0; #endif int recvResult; struct sockaddr_in service; char *replace; SOCKET connectSocket = INVALID_SOCKET; size_t dataLen = strlen(data); do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsaData) != NO_ERROR) { break; } wsaDataInit = 1; #endif /* POTENTIAL FLAW: Read data using a connect socket */ connectSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (connectSocket == INVALID_SOCKET) { break; } memset(&service, 0, sizeof(service)); service.sin_family = AF_INET; service.sin_addr.s_addr = inet_addr(IP_ADDRESS); service.sin_port = htons(TCP_PORT); if (connect(connectSocket, (struct sockaddr*)&service, sizeof(service)) == SOCKET_ERROR) { break; } /* Abort on error or the connection was closed, make sure to recv one * less char than is in the recv_buf in order to append a terminator */ /* Abort on error or the connection was closed */ recvResult = recv(connectSocket, (char *)(data + dataLen), sizeof(char) * (100 - dataLen - 1), 0); if (recvResult == SOCKET_ERROR || recvResult == 0) { break; } /* Append null terminator */ data[dataLen + recvResult / sizeof(char)] = '\0'; /* Eliminate CRLF */ replace = strchr(data, '\r'); if (replace) { *replace = '\0'; } replace = strchr(data, '\n'); if (replace) { *replace = '\0'; } } while (0); if (connectSocket != INVALID_SOCKET) { CLOSE_SOCKET(connectSocket); } #ifdef _WIN32 if (wsaDataInit) { WSACleanup(); } #endif } break; } while(1) { badVaSinkB(data, data); break; } } #endif /* OMITBAD */ #ifndef OMITGOOD static void goodB2GVaSinkG(char * data, ...) { { char dest[100] = ""; va_list args; va_start(args, data); /* FIX: Specify the format disallowing a format string vulnerability */ vsnprintf(dest, 100-1, "%s", args); va_end(args); printLine(dest); } } /* goodB2G() - use badsource and goodsink by changing the sinks in the second while statement */ static void goodB2G() { char * data; char dataBuffer[100] = ""; data = dataBuffer; while(1) { { #ifdef _WIN32 WSADATA wsaData; int wsaDataInit = 0; #endif int recvResult; struct sockaddr_in service; char *replace; SOCKET connectSocket = INVALID_SOCKET; size_t dataLen = strlen(data); do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsaData) != NO_ERROR) { break; } wsaDataInit = 1; #endif /* POTENTIAL FLAW: Read data using a connect socket */ connectSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (connectSocket == INVALID_SOCKET) { break; } memset(&service, 0, sizeof(service)); service.sin_family = AF_INET; service.sin_addr.s_addr = inet_addr(IP_ADDRESS); service.sin_port = htons(TCP_PORT); if (connect(connectSocket, (struct sockaddr*)&service, sizeof(service)) == SOCKET_ERROR) { break; } /* Abort on error or the connection was closed, make sure to recv one * less char than is in the recv_buf in order to append a terminator */ /* Abort on error or the connection was closed */ recvResult = recv(connectSocket, (char *)(data + dataLen), sizeof(char) * (100 - dataLen - 1), 0); if (recvResult == SOCKET_ERROR || recvResult == 0) { break; } /* Append null terminator */ data[dataLen + recvResult / sizeof(char)] = '\0'; /* Eliminate CRLF */ replace = strchr(data, '\r'); if (replace) { *replace = '\0'; } replace = strchr(data, '\n'); if (replace) { *replace = '\0'; } } while (0); if (connectSocket != INVALID_SOCKET) { CLOSE_SOCKET(connectSocket); } #ifdef _WIN32 if (wsaDataInit) { WSACleanup(); } #endif } break; } while(1) { goodB2GVaSinkG(data, data); break; } } static void goodG2BVaSinkB(char * data, ...) { { char dest[100] = ""; va_list args; va_start(args, data); /* POTENTIAL FLAW: Do not specify the format allowing a possible format string vulnerability */ vsnprintf(dest, 100-1, data, args); va_end(args); printLine(dest); } } /* goodG2B() - use goodsource and badsink by changing the sources in the first while statement */ static void goodG2B() { char * data; char dataBuffer[100] = ""; data = dataBuffer; while(1) { /* FIX: Use a fixed string that does not contain a format specifier */ strcpy(data, "fixedstringtest"); break; } while(1) { goodG2BVaSinkB(data, data); break; } } void CWE134_Uncontrolled_Format_String__char_connect_socket_w32_vsnprintf_16_good() { goodG2B(); goodB2G(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE134_Uncontrolled_Format_String__char_connect_socket_w32_vsnprintf_16_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE134_Uncontrolled_Format_String__char_connect_socket_w32_vsnprintf_16_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
587039.c
/* Copyright (C) 1991-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sysdep.h> #include "exit.h" #include "set-hooks.h" DEFINE_HOOK (__libc_atexit, (void)) /* Call all functions registered with `atexit' and `on_exit', in the reverse of the order in which they were registered perform stdio cleanup, and terminate program execution with STATUS. */ void attribute_hidden __run_exit_handlers (int status, struct exit_function_list **listp, bool run_list_atexit) { /* First, call the TLS destructors. */ #ifndef SHARED if (&__call_tls_dtors != NULL) #endif __call_tls_dtors (); /* We do it this way to handle recursive calls to exit () made by the functions registered with `atexit' and `on_exit'. We call everyone on the list and use the status value in the last exit (). */ while (*listp != NULL) { struct exit_function_list *cur = *listp; while (cur->idx > 0) { const struct exit_function *const f = &cur->fns[--cur->idx]; switch (f->flavor) { void (*atfct) (void); void (*onfct) (int status, void *arg); void (*cxafct) (void *arg, int status); case ef_free: case ef_us: break; case ef_on: onfct = f->func.on.fn; #ifdef PTR_DEMANGLE PTR_DEMANGLE (onfct); #endif onfct (status, f->func.on.arg); break; case ef_at: atfct = f->func.at; #ifdef PTR_DEMANGLE PTR_DEMANGLE (atfct); #endif atfct (); break; case ef_cxa: cxafct = f->func.cxa.fn; #ifdef PTR_DEMANGLE PTR_DEMANGLE (cxafct); #endif cxafct (f->func.cxa.arg, status); break; } } *listp = cur->next; if (*listp != NULL) /* Don't free the last element in the chain, this is the statically allocate element. */ free (cur); } if (run_list_atexit) RUN_HOOK (__libc_atexit, ()); _exit (status); } void exit (int status) { __run_exit_handlers (status, &__exit_funcs, true); } libc_hidden_def (exit)
812441.c
//* bios_adc.c * #include "bios_adc_sleep.h" #include <avr/io.h> #include <avr/interrupt.h> #include <avr/sleep.h> static volatile uint16_t adc_result = 0; void adc_init_wslepp (void) { ADCSRA |= (1 << ADPS2) | (1 << ADPS1) | (1 << ADPS0) | (1<<ADIE); // Set ADC clock to 125kHz (==16MHz/128), enable ADC complete interrupt ADMUX = 0; // for now let us use the very 1st channel ADMUX |= (1 << REFS0); // use VCC as Vref (==AVCC==5V) ADMUX |= (1 << ADLAR); // left justify the result so that 8bits can be read from the high register // this works well if you do not need to have a 4bit cushion to prevent overflow on computations set_sleep_mode(SLEEP_MODE_ADC); // or MCUCR |= (1 << SM0); ADCSRA |= (1 << ADEN); // start ADC } void adc_shutdown (void) { ADCSRA = 0; ADMUX = 0; } void adc_select_ch (uint8_t channel) { ADMUX = ( ADMUX & 0xE0 ) | ( 0x1F & channel ); } uint8_t adc_get8b (void) { // adc_result = 0; needed only to verify if sleep and interrupt really works sleep_enable(); // or MCUCR |= (1 << SE); // conversion starts automatically when sleep mode is activated below sleep_cpu(); // or __asm volatile ("sleep"); sleep_disable(); // or MCUCR &= ~(1<<SE); return( ( adc_result>>8 ) & 0xFF ); } uint16_t adc_get10b (void) { // adc_result = 0; needed only to verify if sleep and interrupt really works sleep_enable(); // or MCUCR |= (1 << SE); // conversion starts automatically when sleep mode is activated below sleep_cpu(); // or __asm volatile ("sleep"); sleep_disable(); // or MCUCR &= ~(1<<SE); return( adc_result ); } uint16_t adc_get12b (void) { uint16_t sum = 0; // Note: resolution increase works only when noise at level of the last ADC bit is present! for (uint8_t i=0; i<16; ++i) // one needs to use 2^n iterations to increase the accuracy by n-bits, sum += adc_get10b() >> 6; // let's add up right aligned results to allow sum = sum << 2; // ( sum /16 ) << 6 results in ( sum >> 4 ) << 6 which is ( sum << 2 ) return( sum ); // return the average - n=4 yields sum/16, the last 2bits may still contain noise though } ISR(ADC_vect) { adc_result = ADC; MCUCR &= ~(1<<SE); // clear enable sleep }
200836.c
#include "first.h" #include "response.h" #include "request.h" #include "base.h" #include "fdevent.h" #include "http_header.h" #include "http_kv.h" #include "log.h" #include "stat_cache.h" #include "chunk.h" #include "http_chunk.h" #include "http_date.h" #include "http_range.h" #include "plugin.h" #include <sys/types.h> #include <sys/stat.h> #include "sys-time.h" #include <limits.h> #include <errno.h> #include <stdlib.h> #include <string.h> int http_response_omit_header (request_st * const r, const data_string * const ds) { const size_t klen = buffer_string_length(&ds->key); if (klen == sizeof("X-Sendfile")-1 && buffer_eq_icase_ssn(ds->key.ptr, CONST_STR_LEN("X-Sendfile"))) return 1; if (klen >= sizeof("X-LIGHTTPD-")-1 && buffer_eq_icase_ssn(ds->key.ptr, CONST_STR_LEN("X-LIGHTTPD-"))) { if (klen == sizeof("X-LIGHTTPD-KBytes-per-second")-1 && buffer_eq_icase_ssn(ds->key.ptr+sizeof("X-LIGHTTPD-")-1, CONST_STR_LEN("KBytes-per-second"))) { /* "X-LIGHTTPD-KBytes-per-second" */ off_t limit = strtol(ds->value.ptr, NULL, 10) << 10; /*(*=1024)*/ if (limit > 0 && (limit < r->conf.bytes_per_second || 0 == r->conf.bytes_per_second)) { r->conf.bytes_per_second = limit; } } return 1; } return 0; } __attribute_cold__ static void http_response_write_header_partial_1xx (request_st * const r, buffer * const b) { /* take data in con->write_queue and move into b * (to be sent prior to final response headers in r->write_queue) */ connection * const con = r->con; /*assert(&r->write_queue != con->write_queue);*/ chunkqueue * const cq = con->write_queue; con->write_queue = &r->write_queue; /*assert(0 == buffer_string_length(b));*//*expect empty buffer from caller*/ uint32_t len = (uint32_t)chunkqueue_length(cq); /*(expecting MEM_CHUNK(s), so not expecting error reading files)*/ if (chunkqueue_read_data(cq, buffer_string_prepare_append(b, len), len, r->conf.errh) < 0) len = 0; buffer_string_set_length(b, len);/*expect initial empty buffer from caller*/ chunkqueue_free(cq); } void http_response_write_header (request_st * const r) { chunkqueue * const cq = &r->write_queue; buffer * const b = chunkqueue_prepend_buffer_open(cq); if (cq != r->con->write_queue) http_response_write_header_partial_1xx(r, b); const char * const httpv = (r->http_version == HTTP_VERSION_1_1) ? "HTTP/1.1 " : "HTTP/1.0 "; buffer_append_string_len(b, httpv, sizeof("HTTP/1.1 ")-1); http_status_append(b, r->http_status); /* disable keep-alive if requested */ if (r->con->request_count > r->conf.max_keep_alive_requests || 0 == r->conf.max_keep_alive_idle) { r->keep_alive = 0; } else if (0 != r->reqbody_length && r->reqbody_length != r->reqbody_queue.bytes_in && (NULL == r->handler_module || 0 == (r->conf.stream_request_body & (FDEVENT_STREAM_REQUEST | FDEVENT_STREAM_REQUEST_BUFMIN)))) { r->keep_alive = 0; } else { r->con->keep_alive_idle = r->conf.max_keep_alive_idle; } if (light_btst(r->resp_htags, HTTP_HEADER_UPGRADE) && r->http_version == HTTP_VERSION_1_1) { http_header_response_set(r, HTTP_HEADER_CONNECTION, CONST_STR_LEN("Connection"), CONST_STR_LEN("upgrade")); } else if (r->keep_alive <= 0) { http_header_response_set(r, HTTP_HEADER_CONNECTION, CONST_STR_LEN("Connection"), CONST_STR_LEN("close")); } else if (r->http_version == HTTP_VERSION_1_0) {/*(&& r->keep_alive > 0)*/ http_header_response_set(r, HTTP_HEADER_CONNECTION, CONST_STR_LEN("Connection"), CONST_STR_LEN("keep-alive")); } if (304 == r->http_status && light_btst(r->resp_htags, HTTP_HEADER_CONTENT_ENCODING)) { http_header_response_unset(r, HTTP_HEADER_CONTENT_ENCODING, CONST_STR_LEN("Content-Encoding")); } /* add all headers */ for (size_t i = 0; i < r->resp_headers.used; ++i) { const data_string * const ds = (data_string *)r->resp_headers.data[i]; const uint32_t klen = buffer_string_length(&ds->key); const uint32_t vlen = buffer_string_length(&ds->value); if (0 == klen || 0 == vlen) continue; if ((ds->key.ptr[0] & 0xdf) == 'X' && http_response_omit_header(r, ds)) continue; char * restrict s = buffer_extend(b, klen+vlen+4); s[0] = '\r'; s[1] = '\n'; memcpy(s+2, ds->key.ptr, klen); s += 2+klen; s[0] = ':'; s[1] = ' '; memcpy(s+2, ds->value.ptr, vlen); } if (!light_btst(r->resp_htags, HTTP_HEADER_DATE)) { /* HTTP/1.1 and later requires a Date: header */ /* "\r\nDate: " 8-chars + 30-chars "%a, %d %b %Y %T GMT" + '\0' */ static time_t tlast = 0; static char tstr[40] = "\r\nDate: "; /* cache the generated timestamp */ const time_t cur_ts = log_epoch_secs; if (__builtin_expect ( (tlast != cur_ts), 0)) http_date_time_to_str(tstr+8, sizeof(tstr)-8, (tlast = cur_ts)); buffer_append_string_len(b, tstr, 37); } if (!light_btst(r->resp_htags, HTTP_HEADER_SERVER) && !buffer_string_is_empty(r->conf.server_tag)) buffer_append_str2(b, CONST_STR_LEN("\r\nServer: "), CONST_BUF_LEN(r->conf.server_tag)); buffer_append_string_len(b, CONST_STR_LEN("\r\n\r\n")); r->resp_header_len = buffer_string_length(b); if (r->conf.log_response_header) { log_error(r->conf.errh,__FILE__,__LINE__,"Response-Header:\n%s",b->ptr); } chunkqueue_prepend_buffer_commit(cq); /*(optimization to use fewer syscalls to send a small response)*/ off_t cqlen; if (r->resp_body_finished && light_btst(r->resp_htags, HTTP_HEADER_CONTENT_LENGTH) && (cqlen = chunkqueue_length(cq) - r->resp_header_len) > 0 && cqlen <= 32768) chunkqueue_small_resp_optim(cq); } __attribute_cold__ static handler_t http_response_physical_path_error (request_st * const r, const int code, const char * const msg) { r->http_status = code; if ((code == 404 && r->conf.log_file_not_found) || r->conf.log_request_handling) { if (NULL == msg) log_perror(r->conf.errh, __FILE__, __LINE__, "-- "); else log_error(r->conf.errh, __FILE__, __LINE__, "%s", msg); log_error(r->conf.errh, __FILE__, __LINE__, "Path : %s", r->physical.path.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "URI : %s", r->uri.path.ptr); } return HANDLER_FINISHED; } static handler_t http_response_physical_path_check(request_st * const r) { stat_cache_entry *sce = stat_cache_get_entry(&r->physical.path); if (__builtin_expect( (sce != NULL), 1)) { /* file exists */ } else { switch (errno) { case ENOTDIR: /* PATH_INFO ! :) */ break; case EACCES: return http_response_physical_path_error(r, 403, NULL); case ENAMETOOLONG: /* file name to be read was too long. return 404 */ case ENOENT: if (r->http_method == HTTP_METHOD_OPTIONS && light_btst(r->resp_htags, HTTP_HEADER_ALLOW)) { r->http_status = 200; return HANDLER_FINISHED; } return http_response_physical_path_error(r, 404, NULL); default: /* we have no idea what happened. let's tell the user so. */ return http_response_physical_path_error(r, 500, NULL); } /* not found, perhaps PATHINFO */ char *pathinfo; { /*(might check at startup that s->document_root does not end in '/')*/ size_t len = buffer_string_length(&r->physical.basedir) - (buffer_has_pathsep_suffix(&r->physical.basedir)); pathinfo = r->physical.path.ptr + len; if ('/' != *pathinfo) { pathinfo = NULL; } else if (pathinfo == r->physical.path.ptr) { /*(basedir is "/")*/ pathinfo = strchr(pathinfo+1, '/'); } } const uint32_t pathused = r->physical.path.used; for (char *pprev = pathinfo; pathinfo; pprev = pathinfo, pathinfo = strchr(pathinfo+1, '/')) { /*(temporarily modify r->physical.path in-place)*/ r->physical.path.used = pathinfo - r->physical.path.ptr + 1; *pathinfo = '\0'; stat_cache_entry * const nsce = stat_cache_get_entry(&r->physical.path); *pathinfo = '/'; r->physical.path.used = pathused; if (NULL == nsce) { pathinfo = pathinfo != pprev ? pprev : NULL; break; } sce = nsce; if (!S_ISDIR(sce->st.st_mode)) break; } if (NULL == pathinfo || !S_ISREG(sce->st.st_mode)) { /* no it really doesn't exists */ return http_response_physical_path_error(r, 404, "-- file not found"); } /* note: historical behavior checks S_ISREG() above, permitting * path-info only on regular files, not dirs or special files */ /* we have a PATHINFO */ if (pathinfo) { size_t len = r->physical.path.ptr+pathused-1-pathinfo, reqlen; if (r->conf.force_lowercase_filenames && len <= (reqlen = buffer_string_length(&r->target)) && buffer_eq_icase_ssn(r->target.ptr + reqlen - len, pathinfo, len)) { /* attempt to preserve case-insensitive PATH_INFO * (works in common case where mod_alias, mod_magnet, and other modules * have not modified the PATH_INFO portion of request URI, or did so * with exactly the PATH_INFO desired) */ buffer_copy_string_len(&r->pathinfo, r->target.ptr + reqlen - len, len); } else { buffer_copy_string_len(&r->pathinfo, pathinfo, len); } /* * shorten uri.path */ buffer_string_set_length(&r->uri.path, buffer_string_length(&r->uri.path) - len); buffer_string_set_length(&r->physical.path, (size_t)(pathinfo - r->physical.path.ptr)); } } if (!r->conf.follow_symlink && 0 != stat_cache_path_contains_symlink(&r->physical.path, r->conf.errh)) { return http_response_physical_path_error(r, 403, "-- access denied due to symlink restriction"); } /* r->tmp_sce is valid in handle_subrequest_start callback -- * handle_subrquest_start callbacks should not change r->physical.path * (or should invalidate r->tmp_sce). r->tmp_sce is not reset between * requests and is valid only for sequential code after this func succeeds*/ r->tmp_sce = sce; if (S_ISREG(sce->st.st_mode)) /*(common case)*/ return HANDLER_GO_ON; if (S_ISDIR(sce->st.st_mode)) { if (!buffer_has_slash_suffix(&r->uri.path)) { http_response_redirect_to_directory(r, 301); return HANDLER_FINISHED; } } else { /* any special handling of other non-reg files ?*/ } return HANDLER_GO_ON; } __attribute_cold__ __attribute_noinline__ static handler_t http_status_set_error_close (request_st * const r, int status) { r->keep_alive = 0; r->resp_body_finished = 1; r->handler_module = NULL; r->http_status = status; return HANDLER_FINISHED; } __attribute_cold__ static handler_t http_response_prepare_options_star (request_st * const r) { r->http_status = 200; r->resp_body_finished = 1; http_header_response_append(r, HTTP_HEADER_ALLOW, CONST_STR_LEN("Allow"), CONST_STR_LEN("OPTIONS, GET, HEAD, POST")); return HANDLER_FINISHED; } __attribute_cold__ static handler_t http_response_prepare_connect (request_st * const r) { return (r->handler_module) ? HANDLER_GO_ON : http_status_set_error_close(r, 405);/* 405 Method Not Allowed */ } static handler_t http_response_config (request_st * const r) { config_cond_cache_reset(r); config_patch_config(r); /* do we have to downgrade from 1.1 to 1.0 ? (ignore for HTTP/2) */ if (__builtin_expect( (!r->conf.allow_http11), 0) && r->http_version == HTTP_VERSION_1_1) r->http_version = HTTP_VERSION_1_0; if (__builtin_expect( (r->reqbody_length > 0), 0) && 0 != r->conf.max_request_size /* r->conf.max_request_size in kB */ && (off_t)r->reqbody_length > ((off_t)r->conf.max_request_size << 10)) { log_error(r->conf.errh, __FILE__, __LINE__, "request-size too long: %lld -> 413", (long long) r->reqbody_length); return /* 413 Payload Too Large */ http_status_set_error_close(r, 413); } return HANDLER_GO_ON; } __attribute_cold__ static handler_t http_response_comeback (request_st * const r); static handler_t http_response_prepare (request_st * const r) { handler_t rc; do { /* looks like someone has already made a decision */ if (r->http_status != 0 && r->http_status != 200) { if (0 == r->resp_body_finished) http_response_body_clear(r, 0); return HANDLER_FINISHED; } /* no decision yet, build conf->filename */ if (buffer_is_empty(&r->physical.path)) { if (__builtin_expect( (!r->async_callback), 1)) { rc = http_response_config(r); if (HANDLER_GO_ON != rc) continue; } else r->async_callback = 0; /* reset */ /* we only come here when we have the parse the full request again * * a HANDLER_COMEBACK from mod_rewrite and mod_fastcgi might be a * problem here as mod_setenv might get called multiple times * * fastcgi-auth might lead to a COMEBACK too * fastcgi again dead server too */ if (r->conf.log_request_handling) { log_error(r->conf.errh, __FILE__, __LINE__, "-- parsed Request-URI"); log_error(r->conf.errh, __FILE__, __LINE__, "Request-URI : %s", r->target.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "URI-scheme : %s", r->uri.scheme.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "URI-authority : %s", r->uri.authority.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "URI-path (clean): %s", r->uri.path.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "URI-query : %.*s", BUFFER_INTLEN_PTR(&r->uri.query)); } /** * * call plugins * * - based on the raw URL * */ rc = plugins_call_handle_uri_raw(r); if (HANDLER_GO_ON != rc) continue; /** * * call plugins * * - based on the clean URL * */ rc = plugins_call_handle_uri_clean(r); if (HANDLER_GO_ON != rc) continue; if (__builtin_expect( (r->http_method == HTTP_METHOD_OPTIONS), 0) && r->uri.path.ptr[0] == '*' && r->uri.path.ptr[1] == '\0') return http_response_prepare_options_star(r); if (__builtin_expect( (r->http_method == HTTP_METHOD_CONNECT), 0)) return http_response_prepare_connect(r); /*** * * border * * logical filename (URI) becomes a physical filename here * * * */ /* 1. stat() * ... ISREG() -> ok, go on * ... ISDIR() -> index-file -> redirect * * 2. pathinfo() * ... ISREG() * * 3. -> 404 * */ /* * SEARCH DOCUMENT ROOT */ /* set a default */ buffer_copy_buffer(&r->physical.doc_root, r->conf.document_root); buffer_copy_buffer(&r->physical.rel_path, &r->uri.path); #if defined(__WIN32) || defined(__CYGWIN__) /* strip dots from the end and spaces * * windows/dos handle those filenames as the same file * * foo == foo. == foo..... == "foo... " == "foo.. ./" * * This will affect in some cases PATHINFO * * on native windows we could prepend the filename with \\?\ to circumvent * this behaviour. I have no idea how to push this through cygwin * * */ if (!buffer_string_is_empty(&r->physical.rel_path)) { buffer *b = &r->physical.rel_path; size_t len = buffer_string_length(b); /* strip trailing " /" or "./" once */ if (len > 1 && b->ptr[len - 1] == '/' && (b->ptr[len - 2] == ' ' || b->ptr[len - 2] == '.')) { len -= 2; } /* strip all trailing " " and "." */ while (len > 0 && ( ' ' == b->ptr[len-1] || '.' == b->ptr[len-1] ) ) --len; buffer_string_set_length(b, len); } #endif if (r->conf.log_request_handling) { log_error(r->conf.errh, __FILE__, __LINE__, "-- before doc_root"); log_error(r->conf.errh, __FILE__, __LINE__, "Doc-Root : %s", r->physical.doc_root.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "Rel-Path : %s", r->physical.rel_path.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "Path : %s", r->physical.path.ptr); } /* the docroot plugin should set the doc_root and might also set the physical.path * for us (all vhost-plugins are supposed to set the doc_root) * */ rc = plugins_call_handle_docroot(r); if (HANDLER_GO_ON != rc) continue; /* MacOS X and Windows can't distinguish between upper and lower-case * * convert to lower-case */ if (r->conf.force_lowercase_filenames) { buffer_to_lower(&r->physical.rel_path); } /* the docroot plugins might set the servername, if they don't we take http-host */ if (buffer_string_is_empty(r->server_name)) { r->server_name = &r->uri.authority; } /** * create physical filename * -> physical.path = docroot + rel_path * */ buffer_copy_buffer(&r->physical.basedir, &r->physical.doc_root); buffer_copy_path_len2(&r->physical.path, CONST_BUF_LEN(&r->physical.doc_root), CONST_BUF_LEN(&r->physical.rel_path)); if (r->conf.log_request_handling) { log_error(r->conf.errh, __FILE__, __LINE__, "-- after doc_root"); log_error(r->conf.errh, __FILE__, __LINE__, "Doc-Root : %s", r->physical.doc_root.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "Rel-Path : %s", r->physical.rel_path.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "Path : %s", r->physical.path.ptr); } rc = plugins_call_handle_physical(r); if (HANDLER_GO_ON != rc) continue; if (r->conf.log_request_handling) { log_error(r->conf.errh, __FILE__, __LINE__, "-- logical -> physical"); log_error(r->conf.errh, __FILE__, __LINE__, "Doc-Root : %s", r->physical.doc_root.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "Basedir : %s", r->physical.basedir.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "Rel-Path : %s", r->physical.rel_path.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "Path : %s", r->physical.path.ptr); } } if (NULL != r->handler_module) return HANDLER_GO_ON; /* * No module grabbed the request yet (like mod_access) * * Go on and check if the file exists at all */ if (r->conf.log_request_handling) { log_error(r->conf.errh, __FILE__, __LINE__, "-- handling physical path"); log_error(r->conf.errh, __FILE__, __LINE__, "Path : %s", r->physical.path.ptr); } rc = http_response_physical_path_check(r); if (HANDLER_GO_ON != rc) continue; /* r->physical.path is non-empty and exists in the filesystem */ if (r->conf.log_request_handling) { log_error(r->conf.errh, __FILE__, __LINE__, "-- handling subrequest"); log_error(r->conf.errh, __FILE__, __LINE__, "Path : %s", r->physical.path.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "URI : %s", r->uri.path.ptr); log_error(r->conf.errh, __FILE__, __LINE__, "Pathinfo : %s", r->pathinfo.ptr); } /* call the handlers */ rc = plugins_call_handle_subrequest_start(r); if (HANDLER_GO_ON != rc) continue; /* if we are still here, no one wanted the file, status 403 is ok I think */ if (NULL == r->handler_module && 0 == r->http_status) { r->http_status = (r->http_method != HTTP_METHOD_OPTIONS) ? 403 : 200; return HANDLER_FINISHED; } return HANDLER_GO_ON; } while (HANDLER_COMEBACK == rc && HANDLER_GO_ON ==(rc = http_response_comeback(r))); return rc; } __attribute_cold__ static handler_t http_response_comeback (request_st * const r) { if (NULL != r->handler_module || !buffer_is_empty(&r->physical.path)) return HANDLER_GO_ON; config_reset_config(r); buffer_copy_buffer(&r->uri.authority,r->http_host);/*copy even if NULL*/ buffer_to_lower(&r->uri.authority); int status = http_request_parse_target(r, r->con->proto_default_port); if (0 == status) { r->conditional_is_valid = (1 << COMP_SERVER_SOCKET) | (1 << COMP_HTTP_SCHEME) | (1 << COMP_HTTP_HOST) | (1 << COMP_HTTP_REMOTE_IP) | (1 << COMP_HTTP_REQUEST_METHOD) | (1 << COMP_HTTP_URL) | (1 << COMP_HTTP_QUERY_STRING) | (1 << COMP_HTTP_REQUEST_HEADER); return HANDLER_GO_ON; } else { r->conditional_is_valid = (1 << COMP_SERVER_SOCKET) | (1 << COMP_HTTP_REMOTE_IP); config_cond_cache_reset(r); return http_status_set_error_close(r, status); } } __attribute_cold__ static void http_response_errdoc_init (request_st * const r) { /* modules that produce headers required with error response should * typically also produce an error document. Make an exception for * mod_auth WWW-Authenticate response header. */ buffer *www_auth = NULL; if (401 == r->http_status) { const buffer * const vb = http_header_response_get(r, HTTP_HEADER_WWW_AUTHENTICATE, CONST_STR_LEN("WWW-Authenticate")); if (NULL != vb) www_auth = buffer_init_buffer(vb); } buffer_reset(&r->physical.path); r->resp_htags = 0; array_reset_data_strings(&r->resp_headers); http_response_body_clear(r, 0); if (NULL != www_auth) { http_header_response_set(r, HTTP_HEADER_WWW_AUTHENTICATE, CONST_STR_LEN("WWW-Authenticate"), CONST_BUF_LEN(www_auth)); buffer_free(www_auth); } } __attribute_cold__ static void http_response_static_errdoc (request_st * const r) { if (NULL == r->handler_module ? r->error_handler_saved_status >= 65535 : (!r->conf.error_intercept || r->error_handler_saved_status)) return; http_response_errdoc_init(r); r->resp_body_finished = 1; /* try to send static errorfile */ if (!buffer_string_is_empty(r->conf.errorfile_prefix)) { buffer_copy_buffer(&r->physical.path, r->conf.errorfile_prefix); buffer_append_int(&r->physical.path, r->http_status); buffer_append_string_len(&r->physical.path, CONST_STR_LEN(".html")); stat_cache_entry *sce = stat_cache_get_entry_open(&r->physical.path, r->conf.follow_symlink); if (sce && 0 == http_chunk_append_file_ref(r, sce)) { const buffer *content_type = stat_cache_content_type_get(sce, r); if (content_type) http_header_response_set(r, HTTP_HEADER_CONTENT_TYPE, CONST_STR_LEN("Content-Type"), CONST_BUF_LEN(content_type)); return; } buffer_clear(&r->physical.path); } /* build default error-page */ buffer * const b = chunkqueue_append_buffer_open(&r->write_queue); buffer_copy_string_len(b, CONST_STR_LEN( "<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n" "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n" " \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n" "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n" " <head>\n" " <title>")); http_status_append(b, r->http_status); buffer_append_string_len(b, CONST_STR_LEN( "</title>\n" " </head>\n" " <body>\n" " <h1>")); http_status_append(b, r->http_status); buffer_append_string_len(b, CONST_STR_LEN( "</h1>\n" " </body>\n" "</html>\n")); chunkqueue_append_buffer_commit(&r->write_queue); http_header_response_set(r, HTTP_HEADER_CONTENT_TYPE, CONST_STR_LEN("Content-Type"), CONST_STR_LEN("text/html")); } __attribute_cold__ static void http_response_merge_trailers (request_st * const r) { /* attempt to merge trailers into headers; header not yet sent by caller */ if (buffer_string_is_empty(&r->gw_dechunk->b)) return; const int done = r->gw_dechunk->done; if (!done) return; /* XXX: !done; could scan for '\n' and send only those */ /* do not include trailers if success status (when response was read from * backend) subsequently changed to error status. http_chunk could add the * trailers, but such actions are better on a different code layer than in * http_chunk.c */ if (done < 400 && r->http_status >= 400) return; /* XXX: trailers passed through; no sanity check currently done * https://tools.ietf.org/html/rfc7230#section-4.1.2 * * Not checking for disallowed fields * Not handling (deprecated) line wrapping * Not strictly checking fields */ const char *k = strchr(r->gw_dechunk->b.ptr, '\n'); /*(skip final chunk)*/ if (NULL == k) return; /*(should not happen)*/ ++k; for (const char *v, *e; (e = strchr(k, '\n')); k = e+1) { v = memchr(k, ':', (size_t)(e - k)); if (NULL == v || v == k || *k == ' ' || *k == '\t') continue; uint32_t klen = (uint32_t)(v - k); do { ++v; } while (*v == ' ' || *v == '\t'); if (*v == '\r' || *v == '\n') continue; enum http_header_e id = http_header_hkey_get(k, klen); http_header_response_insert(r, id, k, klen, v, (size_t)(e - v)); } http_header_response_unset(r, HTTP_HEADER_OTHER, CONST_STR_LEN("Trailer")); buffer_clear(&r->gw_dechunk->b); } static handler_t http_response_write_prepare(request_st * const r) { if (NULL == r->handler_module) { /* static files */ switch(r->http_method) { case HTTP_METHOD_GET: case HTTP_METHOD_POST: case HTTP_METHOD_HEAD: break; case HTTP_METHOD_OPTIONS: if ((!r->http_status || r->http_status == 200) && !buffer_string_is_empty(&r->uri.path) && r->uri.path.ptr[0] != '*') { http_response_body_clear(r, 0); http_response_prepare_options_star(r); /*(treat like "*")*/ } break; default: if (r->http_status == 0) r->http_status = 501; break; } } switch (r->http_status) { case 200: /* common case */ break; case 204: /* class: header only */ case 205: case 304: /* disable chunked encoding again as we have no body */ http_response_body_clear(r, 1); /* no Content-Body, no Content-Length */ http_header_response_unset(r, HTTP_HEADER_CONTENT_LENGTH, CONST_STR_LEN("Content-Length")); r->resp_body_finished = 1; break; default: /* class: header + body */ if (r->http_status == 0) r->http_status = 403; /* only custom body for 4xx and 5xx */ if (r->http_status >= 400 && r->http_status < 600) http_response_static_errdoc(r); break; } if (r->gw_dechunk) http_response_merge_trailers(r); /* Allow filter plugins to change response headers */ switch (plugins_call_handle_response_start(r)) { case HANDLER_GO_ON: case HANDLER_FINISHED: break; default: log_error(r->conf.errh, __FILE__, __LINE__, "response_start plugin failed"); return HANDLER_ERROR; } if (r->resp_body_finished) { /* check for Range request (current impl requires resp_body_finished) */ if (r->conf.range_requests && http_range_rfc7233(r) >= 400) http_response_static_errdoc(r); /* 416 Range Not Satisfiable */ /* set content-length if length is known and not already set */ if (!(r->resp_htags & (light_bshift(HTTP_HEADER_CONTENT_LENGTH) |light_bshift(HTTP_HEADER_TRANSFER_ENCODING)))) { off_t qlen = chunkqueue_length(&r->write_queue); /** * The Content-Length header can only be sent if we have content: * - HEAD does not have a content-body (but can have content-length) * - 1xx, 204 and 304 does not have a content-body * (RFC 2616 Section 4.3) * * Otherwise generate a Content-Length header * (if chunked encoding is not available) * * (should not reach here if 1xx (r->http_status < 200)) */ if (qlen > 0) { buffer_append_int( http_header_response_set_ptr(r, HTTP_HEADER_CONTENT_LENGTH, CONST_STR_LEN("Content-Length")), qlen); } else if (r->http_method != HTTP_METHOD_HEAD && r->http_status != 204 && r->http_status != 304) { /* Content-Length: 0 is important for Redirects (301, ...) as * there might be content. */ http_header_response_set(r, HTTP_HEADER_CONTENT_LENGTH, CONST_STR_LEN("Content-Length"), CONST_STR_LEN("0")); } } } else if (r->http_version == HTTP_VERSION_2) { /* handled by HTTP/2 framing */ } else { /** * response is not yet finished, but we have all headers * * keep-alive requires one of: * - Content-Length: ... (HTTP/1.0 and HTTP/1.0) * - Transfer-Encoding: chunked (HTTP/1.1) * - Upgrade: ... (lighttpd then acts as transparent proxy) */ if (!(r->resp_htags & (light_bshift(HTTP_HEADER_CONTENT_LENGTH) |light_bshift(HTTP_HEADER_TRANSFER_ENCODING) |light_bshift(HTTP_HEADER_UPGRADE)))) { if (r->http_method == HTTP_METHOD_CONNECT && r->http_status == 200){ /*(no transfer-encoding if successful CONNECT)*/ } else if (r->http_version == HTTP_VERSION_1_1) { off_t qlen = chunkqueue_length(&r->write_queue); r->resp_send_chunked = 1; if (qlen) { /* create initial Transfer-Encoding: chunked segment */ buffer * const b = chunkqueue_prepend_buffer_open(&r->write_queue); if (r->resp_decode_chunked && 0 != r->gw_dechunk->gw_chunked) { /*(reconstitute initial partially-decoded chunk)*/ off_t gw_chunked = r->gw_dechunk->gw_chunked; if (gw_chunked > 2) qlen += gw_chunked - 2; else if (1 == gw_chunked) chunkqueue_append_mem(&r->write_queue, CONST_STR_LEN("\r")); } else { chunkqueue_append_mem(&r->write_queue, CONST_STR_LEN("\r\n")); } buffer_append_uint_hex(b, (uintmax_t)qlen); buffer_append_string_len(b, CONST_STR_LEN("\r\n")); chunkqueue_prepend_buffer_commit(&r->write_queue); } http_header_response_append(r, HTTP_HEADER_TRANSFER_ENCODING, CONST_STR_LEN("Transfer-Encoding"), CONST_STR_LEN("chunked")); } else { /* if (r->http_version == HTTP_VERSION_1_0) */ r->keep_alive = 0; } } } if (r->http_method == HTTP_METHOD_HEAD) { /* HEAD request is like a GET, but without the content */ http_response_body_clear(r, 1); r->resp_body_finished = 1; } return HANDLER_GO_ON; } __attribute_cold__ static handler_t http_response_call_error_handler (request_st * const r, const buffer * const error_handler) { /* call error-handler */ /* set REDIRECT_STATUS to save current HTTP status code * for access by dynamic handlers * https://redmine.lighttpd.net/issues/1828 */ buffer_append_int( http_header_env_set_ptr(r, CONST_STR_LEN("REDIRECT_STATUS")), r->http_status); if (error_handler == r->conf.error_handler) { plugins_call_handle_request_reset(r); if (r->reqbody_length) { if (r->reqbody_length != r->reqbody_queue.bytes_in) r->keep_alive = 0; r->reqbody_length = 0; chunkqueue_reset(&r->reqbody_queue); } r->con->is_writable = 1; r->resp_body_finished = 0; r->resp_body_started = 0; r->error_handler_saved_status = r->http_status; r->error_handler_saved_method = r->http_method; r->http_method = HTTP_METHOD_GET; } else { /*(preserve behavior for server.error-handler-404)*/ /*(negative to flag old behavior)*/ r->error_handler_saved_status = -r->http_status; } if (r->http_version == HTTP_VERSION_UNSET) r->http_version = HTTP_VERSION_1_0; buffer_copy_buffer(&r->target, error_handler); http_response_errdoc_init(r); r->http_status = 0; /*(after http_response_errdoc_init())*/ http_response_comeback(r); return HANDLER_COMEBACK; } handler_t http_response_handler (request_st * const r) { const plugin *p = r->handler_module; int rc; if (NULL != p || ((rc = http_response_prepare(r)) == HANDLER_GO_ON && NULL != (p = r->handler_module))) rc = p->handle_subrequest(r, p->data); switch (rc) { case HANDLER_WAIT_FOR_EVENT: if (!r->resp_body_finished && (!r->resp_body_started || 0 == (r->conf.stream_response_body & (FDEVENT_STREAM_RESPONSE |FDEVENT_STREAM_RESPONSE_BUFMIN)))) return HANDLER_WAIT_FOR_EVENT; /* come back here */ /* response headers received from backend; start response */ __attribute_fallthrough__ case HANDLER_GO_ON: case HANDLER_FINISHED: /*(HANDLER_FINISHED if request not handled)*/ if (r->http_status == 0) r->http_status = 200; if (r->error_handler_saved_status > 0) r->http_method = r->error_handler_saved_method; if (NULL == r->handler_module || r->conf.error_intercept) { if (__builtin_expect( (r->error_handler_saved_status), 0)) { const int subreq_status = r->http_status; if (r->error_handler_saved_status > 0) r->http_status = r->error_handler_saved_status; else if (r->http_status == 404 || r->http_status == 403) /* error-handler-404 is a 404 */ r->http_status = -r->error_handler_saved_status; else { /* error-handler-404 is back and has generated content */ /* if Status: was set, take it otherwise use 200 */ } if (200 <= subreq_status && subreq_status <= 299) { /*(flag value to indicate that error handler succeeded) *(for (NULL == r->handler_module))*/ r->error_handler_saved_status = 65535; /* >= 1000 */ } } else if (__builtin_expect( (r->http_status >= 400), 0)) { const buffer *error_handler = NULL; if (!buffer_string_is_empty(r->conf.error_handler)) error_handler = r->conf.error_handler; else if ((r->http_status == 404 || r->http_status == 403) && !buffer_string_is_empty(r->conf.error_handler_404)) error_handler = r->conf.error_handler_404; if (error_handler) return http_response_call_error_handler(r, error_handler); } } /* we have something to send; go on */ /*(CON_STATE_RESPONSE_START; transient state)*/ return http_response_write_prepare(r); case HANDLER_WAIT_FOR_FD: return HANDLER_WAIT_FOR_FD; case HANDLER_COMEBACK: http_response_comeback(r); return HANDLER_COMEBACK; /*case HANDLER_ERROR:*/ default: return HANDLER_ERROR; /* something went wrong */ } }
835307.c
/* APPLE LOCAL file v7 merge */ /* Test the `vcequ16' ARM Neon intrinsic. */ /* This file was autogenerated by neon-testgen. */ /* { dg-do assemble } */ /* { dg-require-effective-target arm_neon_ok } */ /* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */ #include "arm_neon.h" void test_vcequ16 (void) { uint16x4_t out_uint16x4_t; uint16x4_t arg0_uint16x4_t; uint16x4_t arg1_uint16x4_t; out_uint16x4_t = vceq_u16 (arg0_uint16x4_t, arg1_uint16x4_t); } /* { dg-final { scan-assembler "vceq\.i16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */ /* { dg-final { cleanup-saved-temps } } */
99983.c
/****************************************************************************** * * Copyright (C) 2009-2013 Broadcom Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ #include <string.h> #include "bt_target.h" #include "bt_utils.h" #include "gap_int.h" tGAP_CB gap_cb; /******************************************************************************* ** ** Function GAP_SetTraceLevel ** ** Description This function sets the trace level for GAP. If called with ** a value of 0xFF, it simply returns the current trace level. ** ** Returns The new or current trace level ** *******************************************************************************/ UINT8 GAP_SetTraceLevel (UINT8 new_level) { if (new_level != 0xFF) gap_cb.trace_level = new_level; return (gap_cb.trace_level); } /******************************************************************************* ** ** Function GAP_Init ** ** Description Initializes the control blocks used by GAP. ** ** This routine should not be called except once per ** stack invocation. ** ** Returns Nothing ** *******************************************************************************/ void GAP_Init(void) { memset (&gap_cb, 0, sizeof (tGAP_CB)); #if defined(GAP_INITIAL_TRACE_LEVEL) gap_cb.trace_level = GAP_INITIAL_TRACE_LEVEL; #else gap_cb.trace_level = BT_TRACE_LEVEL_NONE; /* No traces */ #endif #if GAP_CONN_INCLUDED == TRUE gap_conn_init(); #endif #if BLE_INCLUDED == TRUE gap_attr_db_init(); #endif }
11687.c
/* $NetBSD: raw_ip6.c,v 1.105 2009/09/16 15:23:05 pooka Exp $ */ /* $KAME: raw_ip6.c,v 1.82 2001/07/23 18:57:56 jinmei Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Copyright (c) 1982, 1986, 1988, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)raw_ip.c 8.2 (Berkeley) 1/4/94 */ #include <sys/cdefs.h> __KERNEL_RCSID(0, "$NetBSD: raw_ip6.c,v 1.105 2009/09/16 15:23:05 pooka Exp $"); #include "opt_ipsec.h" #include <sys/param.h> #include <sys/sysctl.h> #include <sys/malloc.h> #include <sys/mbuf.h> #include <sys/socket.h> #include <sys/protosw.h> #include <sys/socketvar.h> #include <sys/errno.h> #include <sys/systm.h> #include <sys/proc.h> #include <sys/kauth.h> #include <net/if.h> #include <net/route.h> #include <net/if_types.h> #include <net/net_stats.h> #include <netinet/in.h> #include <netinet/in_var.h> #include <netinet/ip6.h> #include <netinet6/ip6_var.h> #include <netinet6/ip6_private.h> #include <netinet6/ip6_mroute.h> #include <netinet/icmp6.h> #include <netinet6/icmp6_private.h> #include <netinet6/in6_pcb.h> #include <netinet6/nd6.h> #include <netinet6/ip6protosw.h> #include <netinet6/scope6_var.h> #include <netinet6/raw_ip6.h> #ifdef IPSEC #include <netinet6/ipsec.h> #include <netinet6/ipsec_private.h> #endif /* IPSEC */ #ifdef FAST_IPSEC #include <netipsec/ipsec.h> #include <netipsec/ipsec_var.h> #include <netipsec/ipsec_private.h> #include <netipsec/ipsec6.h> #endif #include "faith.h" #if defined(NFAITH) && 0 < NFAITH #include <net/if_faith.h> #endif extern struct inpcbtable rawcbtable; struct inpcbtable raw6cbtable; #define ifatoia6(ifa) ((struct in6_ifaddr *)(ifa)) /* * Raw interface to IP6 protocol. */ static percpu_t *rip6stat_percpu; #define RIP6_STATINC(x) _NET_STATINC(rip6stat_percpu, x) static void sysctl_net_inet6_raw6_setup(struct sysctllog **); /* * Initialize raw connection block queue. */ void rip6_init(void) { sysctl_net_inet6_raw6_setup(NULL); in6_pcbinit(&raw6cbtable, 1, 1); rip6stat_percpu = percpu_alloc(sizeof(uint64_t) * RIP6_NSTATS); } /* * Setup generic address and protocol structures * for raw_input routine, then pass them along with * mbuf chain. */ int rip6_input(struct mbuf **mp, int *offp, int proto) { struct mbuf *m = *mp; struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); struct inpcb_hdr *inph; struct in6pcb *in6p; struct in6pcb *last = NULL; struct sockaddr_in6 rip6src; struct mbuf *opts = NULL; RIP6_STATINC(RIP6_STAT_IPACKETS); #if defined(NFAITH) && 0 < NFAITH if (faithprefix(&ip6->ip6_dst)) { /* send icmp6 host unreach? */ m_freem(m); return IPPROTO_DONE; } #endif /* Be proactive about malicious use of IPv4 mapped address */ if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) || IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) { /* XXX stat */ m_freem(m); return IPPROTO_DONE; } sockaddr_in6_init(&rip6src, &ip6->ip6_src, 0, 0, 0); if (sa6_recoverscope(&rip6src) != 0) { /* XXX: should be impossible. */ m_freem(m); return IPPROTO_DONE; } CIRCLEQ_FOREACH(inph, &raw6cbtable.inpt_queue, inph_queue) { in6p = (struct in6pcb *)inph; if (in6p->in6p_af != AF_INET6) continue; if (in6p->in6p_ip6.ip6_nxt && in6p->in6p_ip6.ip6_nxt != proto) continue; if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_laddr) && !IN6_ARE_ADDR_EQUAL(&in6p->in6p_laddr, &ip6->ip6_dst)) continue; if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) && !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &ip6->ip6_src)) continue; if (in6p->in6p_cksum != -1) { RIP6_STATINC(RIP6_STAT_ISUM); if (in6_cksum(m, proto, *offp, m->m_pkthdr.len - *offp)) { RIP6_STATINC(RIP6_STAT_BADSUM); continue; } } if (last) { struct mbuf *n; #ifdef IPSEC /* * Check AH/ESP integrity. */ if (ipsec6_in_reject(m, last)) { IPSEC6_STATINC(IPSEC_STAT_IN_INVAL); /* do not inject data into pcb */ } else #endif /* IPSEC */ #ifdef FAST_IPSEC /* * Check AH/ESP integrity */ if (!ipsec6_in_reject(m,last)) #endif /* FAST_IPSEC */ if ((n = m_copy(m, 0, (int)M_COPYALL)) != NULL) { if (last->in6p_flags & IN6P_CONTROLOPTS) ip6_savecontrol(last, &opts, ip6, n); /* strip intermediate headers */ m_adj(n, *offp); if (sbappendaddr(&last->in6p_socket->so_rcv, (struct sockaddr *)&rip6src, n, opts) == 0) { /* should notify about lost packet */ m_freem(n); if (opts) m_freem(opts); RIP6_STATINC(RIP6_STAT_FULLSOCK); } else sorwakeup(last->in6p_socket); opts = NULL; } } last = in6p; } #ifdef IPSEC /* * Check AH/ESP integrity. */ if (last && ipsec6_in_reject(m, last)) { m_freem(m); IPSEC6_STATINC(IPSEC_STAT_IN_INVAL); IP6_STATDEC(IP6_STAT_DELIVERED); /* do not inject data into pcb */ } else #endif /* IPSEC */ #ifdef FAST_IPSEC if (last && ipsec6_in_reject(m, last)) { m_freem(m); /* * XXX ipsec6_in_reject update stat if there is an error * so we just need to update stats by hand in the case of last is * NULL */ if (!last) IPSEC6_STATINC(IPSEC_STAT_IN_POLVIO); IP6_STATDEC(IP6_STAT_DELIVERED); /* do not inject data into pcb */ } else #endif /* FAST_IPSEC */ if (last) { if (last->in6p_flags & IN6P_CONTROLOPTS) ip6_savecontrol(last, &opts, ip6, m); /* strip intermediate headers */ m_adj(m, *offp); if (sbappendaddr(&last->in6p_socket->so_rcv, (struct sockaddr *)&rip6src, m, opts) == 0) { m_freem(m); if (opts) m_freem(opts); RIP6_STATINC(RIP6_STAT_FULLSOCK); } else sorwakeup(last->in6p_socket); } else { RIP6_STATINC(RIP6_STAT_NOSOCK); if (m->m_flags & M_MCAST) RIP6_STATINC(RIP6_STAT_NOSOCKMCAST); if (proto == IPPROTO_NONE) m_freem(m); else { u_int8_t *prvnxtp = ip6_get_prevhdr(m, *offp); /* XXX */ in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_protounknown); icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_NEXTHEADER, prvnxtp - mtod(m, u_int8_t *)); } IP6_STATDEC(IP6_STAT_DELIVERED); } return IPPROTO_DONE; } void * rip6_ctlinput(int cmd, const struct sockaddr *sa, void *d) { struct ip6_hdr *ip6; struct ip6ctlparam *ip6cp = NULL; const struct sockaddr_in6 *sa6_src = NULL; void *cmdarg; void (*notify)(struct in6pcb *, int) = in6_rtchange; int nxt; if (sa->sa_family != AF_INET6 || sa->sa_len != sizeof(struct sockaddr_in6)) return NULL; if ((unsigned)cmd >= PRC_NCMDS) return NULL; if (PRC_IS_REDIRECT(cmd)) notify = in6_rtchange, d = NULL; else if (cmd == PRC_HOSTDEAD) d = NULL; else if (cmd == PRC_MSGSIZE) ; /* special code is present, see below */ else if (inet6ctlerrmap[cmd] == 0) return NULL; /* if the parameter is from icmp6, decode it. */ if (d != NULL) { ip6cp = (struct ip6ctlparam *)d; ip6 = ip6cp->ip6c_ip6; cmdarg = ip6cp->ip6c_cmdarg; sa6_src = ip6cp->ip6c_src; nxt = ip6cp->ip6c_nxt; } else { ip6 = NULL; cmdarg = NULL; sa6_src = &sa6_any; nxt = -1; } if (ip6 && cmd == PRC_MSGSIZE) { const struct sockaddr_in6 *sa6 = (const struct sockaddr_in6 *)sa; int valid = 0; struct in6pcb *in6p; /* * Check to see if we have a valid raw IPv6 socket * corresponding to the address in the ICMPv6 message * payload, and the protocol (ip6_nxt) meets the socket. * XXX chase extension headers, or pass final nxt value * from icmp6_notify_error() */ in6p = NULL; in6p = in6_pcblookup_connect(&raw6cbtable, &sa6->sin6_addr, 0, (const struct in6_addr *)&sa6_src->sin6_addr, 0, 0); #if 0 if (!in6p) { /* * As the use of sendto(2) is fairly popular, * we may want to allow non-connected pcb too. * But it could be too weak against attacks... * We should at least check if the local * address (= s) is really ours. */ in6p = in6_pcblookup_bind(&raw6cbtable, &sa6->sin6_addr, 0, 0); } #endif if (in6p && in6p->in6p_ip6.ip6_nxt && in6p->in6p_ip6.ip6_nxt == nxt) valid++; /* * Depending on the value of "valid" and routing table * size (mtudisc_{hi,lo}wat), we will: * - recalculate the new MTU and create the * corresponding routing entry, or * - ignore the MTU change notification. */ icmp6_mtudisc_update((struct ip6ctlparam *)d, valid); /* * regardless of if we called icmp6_mtudisc_update(), * we need to call in6_pcbnotify(), to notify path MTU * change to the userland (RFC3542), because some * unconnected sockets may share the same destination * and want to know the path MTU. */ } (void) in6_pcbnotify(&raw6cbtable, sa, 0, (const struct sockaddr *)sa6_src, 0, cmd, cmdarg, notify); return NULL; } /* * Generate IPv6 header and pass packet to ip6_output. * Tack on options user may have setup with control call. */ int rip6_output(struct mbuf *m, struct socket *so, struct sockaddr_in6 *dstsock, struct mbuf *control) { struct in6_addr *dst; struct ip6_hdr *ip6; struct in6pcb *in6p; u_int plen = m->m_pkthdr.len; int error = 0; struct ip6_pktopts opt, *optp = NULL; struct ifnet *oifp = NULL; int type, code; /* for ICMPv6 output statistics only */ int scope_ambiguous = 0; struct in6_addr *in6a; in6p = sotoin6pcb(so); dst = &dstsock->sin6_addr; if (control) { if ((error = ip6_setpktopts(control, &opt, in6p->in6p_outputopts, kauth_cred_get(), so->so_proto->pr_protocol)) != 0) { goto bad; } optp = &opt; } else optp = in6p->in6p_outputopts; /* * Check and convert scope zone ID into internal form. * XXX: we may still need to determine the zone later. */ if (!(so->so_state & SS_ISCONNECTED)) { if (dstsock->sin6_scope_id == 0 && !ip6_use_defzone) scope_ambiguous = 1; if ((error = sa6_embedscope(dstsock, ip6_use_defzone)) != 0) goto bad; } /* * For an ICMPv6 packet, we should know its type and code * to update statistics. */ if (so->so_proto->pr_protocol == IPPROTO_ICMPV6) { struct icmp6_hdr *icmp6; if (m->m_len < sizeof(struct icmp6_hdr) && (m = m_pullup(m, sizeof(struct icmp6_hdr))) == NULL) { error = ENOBUFS; goto bad; } icmp6 = mtod(m, struct icmp6_hdr *); type = icmp6->icmp6_type; code = icmp6->icmp6_code; } else { type = 0; code = 0; } M_PREPEND(m, sizeof(*ip6), M_DONTWAIT); if (!m) { error = ENOBUFS; goto bad; } ip6 = mtod(m, struct ip6_hdr *); /* * Next header might not be ICMP6 but use its pseudo header anyway. */ ip6->ip6_dst = *dst; /* * Source address selection. */ if ((in6a = in6_selectsrc(dstsock, optp, in6p->in6p_moptions, (struct route *)&in6p->in6p_route, &in6p->in6p_laddr, &oifp, &error)) == 0) { if (error == 0) error = EADDRNOTAVAIL; goto bad; } ip6->ip6_src = *in6a; if (oifp && scope_ambiguous) { /* * Application should provide a proper zone ID or the use of * default zone IDs should be enabled. Unfortunately, some * applications do not behave as it should, so we need a * workaround. Even if an appropriate ID is not determined * (when it's required), if we can determine the outgoing * interface. determine the zone ID based on the interface. */ error = in6_setscope(&dstsock->sin6_addr, oifp, NULL); if (error != 0) goto bad; } ip6->ip6_dst = dstsock->sin6_addr; /* fill in the rest of the IPv6 header fields */ ip6->ip6_flow = in6p->in6p_flowinfo & IPV6_FLOWINFO_MASK; ip6->ip6_vfc &= ~IPV6_VERSION_MASK; ip6->ip6_vfc |= IPV6_VERSION; /* ip6_plen will be filled in ip6_output, so not fill it here. */ ip6->ip6_nxt = in6p->in6p_ip6.ip6_nxt; ip6->ip6_hlim = in6_selecthlim(in6p, oifp); if (so->so_proto->pr_protocol == IPPROTO_ICMPV6 || in6p->in6p_cksum != -1) { int off; u_int16_t sum; /* compute checksum */ if (so->so_proto->pr_protocol == IPPROTO_ICMPV6) off = offsetof(struct icmp6_hdr, icmp6_cksum); else off = in6p->in6p_cksum; if (plen < off + 1) { error = EINVAL; goto bad; } off += sizeof(struct ip6_hdr); sum = 0; m = m_copyback_cow(m, off, sizeof(sum), (void *)&sum, M_DONTWAIT); if (m == NULL) { error = ENOBUFS; goto bad; } sum = in6_cksum(m, ip6->ip6_nxt, sizeof(*ip6), plen); m = m_copyback_cow(m, off, sizeof(sum), (void *)&sum, M_DONTWAIT); if (m == NULL) { error = ENOBUFS; goto bad; } } error = ip6_output(m, optp, &in6p->in6p_route, 0, in6p->in6p_moptions, so, &oifp); if (so->so_proto->pr_protocol == IPPROTO_ICMPV6) { if (oifp) icmp6_ifoutstat_inc(oifp, type, code); ICMP6_STATINC(ICMP6_STAT_OUTHIST + type); } else RIP6_STATINC(RIP6_STAT_OPACKETS); goto freectl; bad: if (m) m_freem(m); freectl: if (control) { ip6_clearpktopts(&opt, -1); m_freem(control); } return error; } /* * Raw IPv6 socket option processing. */ int rip6_ctloutput(int op, struct socket *so, struct sockopt *sopt) { int error = 0; if (sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_NOHEADER) { int optval; /* need to fiddle w/ opt(IPPROTO_IPV6, IPV6_CHECKSUM)? */ if (op == PRCO_GETOPT) { optval = 1; error = sockopt_set(sopt, &optval, sizeof(optval)); } else if (op == PRCO_SETOPT) { error = sockopt_getint(sopt, &optval); if (error) goto out; if (optval == 0) error = EINVAL; } goto out; } else if (sopt->sopt_level != IPPROTO_IPV6) return ip6_ctloutput(op, so, sopt); switch (sopt->sopt_name) { case MRT6_INIT: case MRT6_DONE: case MRT6_ADD_MIF: case MRT6_DEL_MIF: case MRT6_ADD_MFC: case MRT6_DEL_MFC: case MRT6_PIM: if (op == PRCO_SETOPT) error = ip6_mrouter_set(so, sopt); else if (op == PRCO_GETOPT) error = ip6_mrouter_get(so, sopt); else error = EINVAL; break; case IPV6_CHECKSUM: return ip6_raw_ctloutput(op, so, sopt); default: return ip6_ctloutput(op, so, sopt); } out: return error; } extern u_long rip6_sendspace; extern u_long rip6_recvspace; int rip6_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam, struct mbuf *control, struct lwp *l) { struct in6pcb *in6p = sotoin6pcb(so); int s; int error = 0; if (req == PRU_CONTROL) return in6_control(so, (u_long)m, (void *)nam, (struct ifnet *)control, l); if (req == PRU_PURGEIF) { mutex_enter(softnet_lock); in6_pcbpurgeif0(&raw6cbtable, (struct ifnet *)control); in6_purgeif((struct ifnet *)control); in6_pcbpurgeif(&raw6cbtable, (struct ifnet *)control); mutex_exit(softnet_lock); return 0; } switch (req) { case PRU_ATTACH: error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_SOCKET, KAUTH_REQ_NETWORK_SOCKET_RAWSOCK, NULL, NULL, NULL); sosetlock(so); if (in6p != NULL) panic("rip6_attach"); if (error) { break; } s = splsoftnet(); error = soreserve(so, rip6_sendspace, rip6_recvspace); if (error != 0) { splx(s); break; } if ((error = in6_pcballoc(so, &raw6cbtable)) != 0) { splx(s); break; } splx(s); in6p = sotoin6pcb(so); in6p->in6p_ip6.ip6_nxt = (long)nam; in6p->in6p_cksum = -1; in6p->in6p_icmp6filt = malloc(sizeof(struct icmp6_filter), M_PCB, M_NOWAIT); if (in6p->in6p_icmp6filt == NULL) { in6_pcbdetach(in6p); error = ENOMEM; break; } ICMP6_FILTER_SETPASSALL(in6p->in6p_icmp6filt); break; case PRU_DISCONNECT: if ((so->so_state & SS_ISCONNECTED) == 0) { error = ENOTCONN; break; } in6p->in6p_faddr = in6addr_any; so->so_state &= ~SS_ISCONNECTED; /* XXX */ break; case PRU_ABORT: soisdisconnected(so); /* Fallthrough */ case PRU_DETACH: if (in6p == NULL) panic("rip6_detach"); if (so == ip6_mrouter) ip6_mrouter_done(); /* xxx: RSVP */ if (in6p->in6p_icmp6filt != NULL) { free(in6p->in6p_icmp6filt, M_PCB); in6p->in6p_icmp6filt = NULL; } in6_pcbdetach(in6p); break; case PRU_BIND: { struct sockaddr_in6 *addr = mtod(nam, struct sockaddr_in6 *); struct ifaddr *ia = NULL; if (nam->m_len != sizeof(*addr)) { error = EINVAL; break; } if (TAILQ_EMPTY(&ifnet) || addr->sin6_family != AF_INET6) { error = EADDRNOTAVAIL; break; } if ((error = sa6_embedscope(addr, ip6_use_defzone)) != 0) break; /* * we don't support mapped address here, it would confuse * users so reject it */ if (IN6_IS_ADDR_V4MAPPED(&addr->sin6_addr)) { error = EADDRNOTAVAIL; break; } if (!IN6_IS_ADDR_UNSPECIFIED(&addr->sin6_addr) && (ia = ifa_ifwithaddr((struct sockaddr *)addr)) == 0) { error = EADDRNOTAVAIL; break; } if (ia && ((struct in6_ifaddr *)ia)->ia6_flags & (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY| IN6_IFF_DETACHED|IN6_IFF_DEPRECATED)) { error = EADDRNOTAVAIL; break; } in6p->in6p_laddr = addr->sin6_addr; break; } case PRU_CONNECT: { struct sockaddr_in6 *addr = mtod(nam, struct sockaddr_in6 *); struct in6_addr *in6a = NULL; struct ifnet *ifp = NULL; int scope_ambiguous = 0; if (nam->m_len != sizeof(*addr)) { error = EINVAL; break; } if (TAILQ_EMPTY(&ifnet)) { error = EADDRNOTAVAIL; break; } if (addr->sin6_family != AF_INET6) { error = EAFNOSUPPORT; break; } /* * Application should provide a proper zone ID or the use of * default zone IDs should be enabled. Unfortunately, some * applications do not behave as it should, so we need a * workaround. Even if an appropriate ID is not determined, * we'll see if we can determine the outgoing interface. If we * can, determine the zone ID based on the interface below. */ if (addr->sin6_scope_id == 0 && !ip6_use_defzone) scope_ambiguous = 1; if ((error = sa6_embedscope(addr, ip6_use_defzone)) != 0) return error; /* Source address selection. XXX: need pcblookup? */ in6a = in6_selectsrc(addr, in6p->in6p_outputopts, in6p->in6p_moptions, (struct route *)&in6p->in6p_route, &in6p->in6p_laddr, &ifp, &error); if (in6a == NULL) { if (error == 0) error = EADDRNOTAVAIL; break; } /* XXX: see above */ if (ifp && scope_ambiguous && (error = in6_setscope(&addr->sin6_addr, ifp, NULL)) != 0) { break; } in6p->in6p_laddr = *in6a; in6p->in6p_faddr = addr->sin6_addr; soisconnected(so); break; } case PRU_CONNECT2: error = EOPNOTSUPP; break; /* * Mark the connection as being incapable of futther input. */ case PRU_SHUTDOWN: socantsendmore(so); break; /* * Ship a packet out. The appropriate raw output * routine handles any messaging necessary. */ case PRU_SEND: { struct sockaddr_in6 tmp; struct sockaddr_in6 *dst; /* always copy sockaddr to avoid overwrites */ if (so->so_state & SS_ISCONNECTED) { if (nam) { error = EISCONN; break; } /* XXX */ sockaddr_in6_init(&tmp, &in6p->in6p_faddr, 0, 0, 0); dst = &tmp; } else { if (nam == NULL) { error = ENOTCONN; break; } if (nam->m_len != sizeof(tmp)) { error = EINVAL; break; } tmp = *mtod(nam, struct sockaddr_in6 *); dst = &tmp; if (dst->sin6_family != AF_INET6) { error = EAFNOSUPPORT; break; } } error = rip6_output(m, so, dst, control); m = NULL; break; } case PRU_SENSE: /* * stat: don't bother with a blocksize */ return 0; /* * Not supported. */ case PRU_RCVOOB: case PRU_RCVD: case PRU_LISTEN: case PRU_ACCEPT: case PRU_SENDOOB: error = EOPNOTSUPP; break; case PRU_SOCKADDR: in6_setsockaddr(in6p, nam); break; case PRU_PEERADDR: in6_setpeeraddr(in6p, nam); break; default: panic("rip6_usrreq"); } if (m != NULL) m_freem(m); return error; } static int sysctl_net_inet6_raw6_stats(SYSCTLFN_ARGS) { return (NETSTAT_SYSCTL(rip6stat_percpu, RIP6_NSTATS)); } static void sysctl_net_inet6_raw6_setup(struct sysctllog **clog) { sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_NODE, "net", NULL, NULL, 0, NULL, 0, CTL_NET, CTL_EOL); sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_NODE, "inet6", NULL, NULL, 0, NULL, 0, CTL_NET, PF_INET6, CTL_EOL); sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_NODE, "raw6", SYSCTL_DESCR("Raw IPv6 settings"), NULL, 0, NULL, 0, CTL_NET, PF_INET6, IPPROTO_RAW, CTL_EOL); sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_STRUCT, "pcblist", SYSCTL_DESCR("Raw IPv6 control block list"), sysctl_inpcblist, 0, &raw6cbtable, 0, CTL_NET, PF_INET6, IPPROTO_RAW, CTL_CREATE, CTL_EOL); sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_STRUCT, "stats", SYSCTL_DESCR("Raw IPv6 statistics"), sysctl_net_inet6_raw6_stats, 0, NULL, 0, CTL_NET, PF_INET6, IPPROTO_RAW, RAW6CTL_STATS, CTL_EOL); }
360189.c
/** * Copyright (c) 2014 - 2017, Nordic Semiconductor ASA * * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form, except as embedded into a Nordic * Semiconductor ASA integrated circuit in a product or a software update for * such product, must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * * 3. Neither the name of Nordic Semiconductor ASA nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * 4. This software, with or without modification, must only be used with a * Nordic Semiconductor ASA integrated circuit. * * 5. Any software provided in binary form under this license must not be reverse * engineered, decompiled, modified and/or disassembled. * * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <stdbool.h> #include <stdint.h> #include <stddef.h> #include "ser_sd_transport.h" #include "ser_hal_transport.h" #include "nrf_error.h" #include "app_error.h" #include "ble_serialization.h" #include "ser_dbg_sd_str.h" #include "ser_app_power_system_off.h" #include "app_util.h" #define NRF_LOG_MODULE_NAME "SER_XFER" #include "nrf_log.h" /** SoftDevice event handler. */ static ser_sd_transport_evt_handler_t m_evt_handler = NULL; /** 'One time' handler called in task context while waiting for response to scheduled command. */ static ser_sd_transport_rsp_wait_handler_t m_ot_rsp_wait_handler = NULL; /** Handler called in task context while waiting for response to scheduled command. */ static ser_sd_transport_rsp_wait_handler_t m_os_rsp_wait_handler = NULL; /** Handler called in serial peripheral interrupt context when response is received. */ static ser_sd_transport_rsp_set_handler_t m_os_rsp_set_handler = NULL; /** Handler called when hal_transport notifies that packet reception has started. */ static ser_sd_transport_rx_notification_handler_t m_rx_notify_handler = NULL; /** User decoder handler for expected response packet. */ static ser_sd_transport_rsp_handler_t m_rsp_dec_handler = NULL; /** Flag indicated whether module is waiting for response packet. */ static volatile bool m_rsp_wait = false; /** SoftDevice call return value decoded by user decoder handler. */ static uint32_t m_return_value; /**@brief Function for handling the rx packets comming from hal_transport. * * @details * This function is called in serial peripheral interrupt context. Response packets are handled in * this context. Events are passed to the application and it is up to application in which context * they are handled. * * @param[in] p_data Pointer to received data. * @param[in] length Size of data. */ static void ser_sd_transport_rx_packet_handler(uint8_t * p_data, uint16_t length) { if (p_data && (length >= SER_PKT_TYPE_SIZE)) { const uint8_t packet_type = p_data[SER_PKT_TYPE_POS]; p_data += SER_PKT_TYPE_SIZE; length -= SER_PKT_TYPE_SIZE; switch (packet_type) { case SER_PKT_TYPE_RESP: case SER_PKT_TYPE_DTM_RESP: if (m_rsp_wait) { m_return_value = m_rsp_dec_handler(p_data, length); (void)ser_sd_transport_rx_free(p_data); /* Reset response flag - cmd_write function is pending on it.*/ m_rsp_wait = false; /* If os handler is set, signal os that response has arrived.*/ if (m_os_rsp_set_handler) { m_os_rsp_set_handler(); } } else { /* Unexpected packet. */ (void)ser_sd_transport_rx_free(p_data); APP_ERROR_HANDLER(packet_type); } break; case SER_PKT_TYPE_EVT: /* It is ensured during opening that handler is not NULL. No check needed. */ NRF_LOG_DEBUG("[EVT]: %s \r\n", (uint32_t)ser_dbg_sd_evt_str_get(uint16_decode(&p_data[SER_EVT_ID_POS]))); // p_data points to EVT_ID m_evt_handler(p_data, length); break; default: (void)ser_sd_transport_rx_free(p_data); APP_ERROR_HANDLER(packet_type); break; } } } /**@brief Function for handling the event from hal_transport. * * @param[in] event Event from hal_transport. */ static void ser_sd_transport_hal_handler(ser_hal_transport_evt_t event) { switch (event.evt_type) { case SER_HAL_TRANSP_EVT_RX_PKT_RECEIVED: ser_sd_transport_rx_packet_handler(event.evt_params.rx_pkt_received.p_buffer, event.evt_params.rx_pkt_received.num_of_bytes); break; case SER_HAL_TRANSP_EVT_RX_PKT_RECEIVING: if (m_rx_notify_handler) { m_rx_notify_handler(); } break; case SER_HAL_TRANSP_EVT_TX_PKT_SENT: if (ser_app_power_system_off_get() == true) { ser_app_power_system_off_enter(); } break; case SER_HAL_TRANSP_EVT_PHY_ERROR: if (m_rsp_wait) { m_return_value = NRF_ERROR_INTERNAL; /* Reset response flag - cmd_write function is pending on it.*/ m_rsp_wait = false; /* If os handler is set, signal os that response has arrived.*/ if (m_os_rsp_set_handler) { m_os_rsp_set_handler(); } } break; default: break; } } uint32_t ser_sd_transport_open(ser_sd_transport_evt_handler_t evt_handler, ser_sd_transport_rsp_wait_handler_t os_rsp_wait_handler, ser_sd_transport_rsp_set_handler_t os_rsp_set_handler, ser_sd_transport_rx_notification_handler_t rx_notify_handler) { m_os_rsp_wait_handler = os_rsp_wait_handler; m_os_rsp_set_handler = os_rsp_set_handler; m_rx_notify_handler = rx_notify_handler; m_ot_rsp_wait_handler = NULL; m_evt_handler = evt_handler; if (evt_handler == NULL) { return NRF_ERROR_INVALID_PARAM; } return ser_hal_transport_open(ser_sd_transport_hal_handler); } uint32_t ser_sd_transport_close(void) { m_evt_handler = NULL; m_os_rsp_wait_handler = NULL; m_os_rsp_set_handler = NULL; m_ot_rsp_wait_handler = NULL; ser_hal_transport_close(); return NRF_SUCCESS; } uint32_t ser_sd_transport_ot_rsp_wait_handler_set(ser_sd_transport_rsp_wait_handler_t handler) { m_ot_rsp_wait_handler = handler; return NRF_SUCCESS; } bool ser_sd_transport_is_busy(void) { return m_rsp_wait; } uint32_t ser_sd_transport_tx_alloc(uint8_t * * pp_data, uint16_t * p_len) { uint32_t err_code; if (m_rsp_wait) { err_code = NRF_ERROR_BUSY; } else { err_code = ser_hal_transport_tx_pkt_alloc(pp_data, p_len); } return err_code; } uint32_t ser_sd_transport_tx_free(uint8_t * p_data) { return ser_hal_transport_tx_pkt_free(p_data); } uint32_t ser_sd_transport_rx_free(uint8_t * p_data) { p_data -= SER_PKT_TYPE_SIZE; return ser_hal_transport_rx_pkt_free(p_data); } uint32_t ser_sd_transport_cmd_write(const uint8_t * p_buffer, uint16_t length, ser_sd_transport_rsp_handler_t cmd_rsp_decode_callback) { uint32_t err_code = NRF_SUCCESS; m_rsp_wait = true; m_rsp_dec_handler = cmd_rsp_decode_callback; err_code = ser_hal_transport_tx_pkt_send(p_buffer, length); APP_ERROR_CHECK(err_code); /* Execute callback for response decoding only if one was provided.*/ if ((err_code == NRF_SUCCESS) && cmd_rsp_decode_callback) { if (m_ot_rsp_wait_handler) { m_ot_rsp_wait_handler(); m_ot_rsp_wait_handler = NULL; } m_os_rsp_wait_handler(); err_code = m_return_value; } else { m_rsp_wait = false; } NRF_LOG_DEBUG("[SD_CALL]:%s, err_code= 0x%X\r\n", (uint32_t)ser_dbg_sd_call_str_get(p_buffer[1]), err_code); return err_code; }
381747.c
/* * QEMU TCG support -- s390x vector support instructions * * Copyright (C) 2019 Red Hat Inc * * Authors: * David Hildenbrand <david@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu-common.h" #include "cpu.h" #include "internal.h" #include "vec.h" #include "tcg/tcg.h" #include "tcg/tcg-gvec-desc.h" #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" #include "exec/exec-all.h" void HELPER(vll)(CPUS390XState *env, void *v1, uint64_t addr, uint64_t bytes) { if (likely(bytes >= 16)) { uint64_t t0, t1; t0 = cpu_ldq_data_ra(env, addr, GETPC()); addr = wrap_address(env, addr + 8); t1 = cpu_ldq_data_ra(env, addr, GETPC()); s390_vec_write_element64(v1, 0, t0); s390_vec_write_element64(v1, 1, t1); } else { S390Vector tmp = {}; int i; for (i = 0; i < bytes; i++) { uint8_t byte = cpu_ldub_data_ra(env, addr, GETPC()); s390_vec_write_element8(&tmp, i, byte); addr = wrap_address(env, addr + 1); } *(S390Vector *)v1 = tmp; } } #define DEF_VPK_HFN(BITS, TBITS) \ typedef uint##TBITS##_t (*vpk##BITS##_fn)(uint##BITS##_t, int *); \ static int vpk##BITS##_hfn(S390Vector *v1, const S390Vector *v2, \ const S390Vector *v3, vpk##BITS##_fn fn) \ { \ int i, saturated = 0; \ S390Vector tmp; \ \ for (i = 0; i < (128 / TBITS); i++) { \ uint##BITS##_t src; \ \ if (i < (128 / BITS)) { \ src = s390_vec_read_element##BITS(v2, i); \ } else { \ src = s390_vec_read_element##BITS(v3, i - (128 / BITS)); \ } \ s390_vec_write_element##TBITS(&tmp, i, fn(src, &saturated)); \ } \ *v1 = tmp; \ return saturated; \ } DEF_VPK_HFN(64, 32) DEF_VPK_HFN(32, 16) DEF_VPK_HFN(16, 8) #define DEF_VPK(BITS, TBITS) \ static uint##TBITS##_t vpk##BITS##e(uint##BITS##_t src, int *saturated) \ { \ return src; \ } \ void HELPER(gvec_vpk##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ vpk##BITS##_hfn(v1, v2, v3, vpk##BITS##e); \ } DEF_VPK(64, 32) DEF_VPK(32, 16) DEF_VPK(16, 8) #define DEF_VPKS(BITS, TBITS) \ static uint##TBITS##_t vpks##BITS##e(uint##BITS##_t src, int *saturated) \ { \ if ((int##BITS##_t)src > INT##TBITS##_MAX) { \ (*saturated)++; \ return INT##TBITS##_MAX; \ } else if ((int##BITS##_t)src < INT##TBITS##_MIN) { \ (*saturated)++; \ return INT##TBITS##_MIN; \ } \ return src; \ } \ void HELPER(gvec_vpks##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \ } \ void HELPER(gvec_vpks_cc##BITS)(void *v1, const void *v2, const void *v3, \ CPUS390XState *env, uint32_t desc) \ { \ int saturated = vpk##BITS##_hfn(v1, v2, v3, vpks##BITS##e); \ \ if (saturated == (128 / TBITS)) { \ env->cc_op = 3; \ } else if (saturated) { \ env->cc_op = 1; \ } else { \ env->cc_op = 0; \ } \ } DEF_VPKS(64, 32) DEF_VPKS(32, 16) DEF_VPKS(16, 8) #define DEF_VPKLS(BITS, TBITS) \ static uint##TBITS##_t vpkls##BITS##e(uint##BITS##_t src, int *saturated) \ { \ if (src > UINT##TBITS##_MAX) { \ (*saturated)++; \ return UINT##TBITS##_MAX; \ } \ return src; \ } \ void HELPER(gvec_vpkls##BITS)(void *v1, const void *v2, const void *v3, \ uint32_t desc) \ { \ vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \ } \ void HELPER(gvec_vpkls_cc##BITS)(void *v1, const void *v2, const void *v3, \ CPUS390XState *env, uint32_t desc) \ { \ int saturated = vpk##BITS##_hfn(v1, v2, v3, vpkls##BITS##e); \ \ if (saturated == (128 / TBITS)) { \ env->cc_op = 3; \ } else if (saturated) { \ env->cc_op = 1; \ } else { \ env->cc_op = 0; \ } \ } DEF_VPKLS(64, 32) DEF_VPKLS(32, 16) DEF_VPKLS(16, 8) void HELPER(gvec_vperm)(void *v1, const void *v2, const void *v3, const void *v4, uint32_t desc) { S390Vector tmp; int i; for (i = 0; i < 16; i++) { const uint8_t selector = s390_vec_read_element8(v4, i) & 0x1f; uint8_t byte; if (selector < 16) { byte = s390_vec_read_element8(v2, selector); } else { byte = s390_vec_read_element8(v3, selector - 16); } s390_vec_write_element8(&tmp, i, byte); } *(S390Vector *)v1 = tmp; } void HELPER(vstl)(CPUS390XState *env, const void *v1, uint64_t addr, uint64_t bytes) { /* Probe write access before actually modifying memory */ probe_write_access(env, addr, bytes, GETPC()); if (likely(bytes >= 16)) { cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 0), GETPC()); addr = wrap_address(env, addr + 8); cpu_stq_data_ra(env, addr, s390_vec_read_element64(v1, 1), GETPC()); } else { S390Vector tmp = {}; int i; for (i = 0; i < bytes; i++) { uint8_t byte = s390_vec_read_element8(v1, i); cpu_stb_data_ra(env, addr, byte, GETPC()); addr = wrap_address(env, addr + 1); } *(S390Vector *)v1 = tmp; } }
309843.c
/* * Copyright (c) 2013-2021, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of Intel Corporation nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #if defined(FEATURE_ELF) # include "load_elf.h" #endif /* defined(FEATURE_ELF) */ #include "pt_cpu.h" #include "pt_version.h" #include "intel-pt.h" #if defined(FEATURE_SIDEBAND) # include "libipt-sb.h" #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <inttypes.h> #include <errno.h> #include <xed-interface.h> /* The type of decoder to be used. */ enum ptxed_decoder_type { pdt_insn_decoder, pdt_block_decoder }; /* The decoder to use. */ struct ptxed_decoder { /* The decoder type. */ enum ptxed_decoder_type type; /* The actual decoder. */ union { /* If @type == pdt_insn_decoder */ struct pt_insn_decoder *insn; /* If @type == pdt_block_decoder */ struct pt_block_decoder *block; } variant; /* Decoder-specific configuration. * * We use a set of structs to store the configuration for multiple * decoders. * * - block decoder. */ struct { /* A collection of decoder-specific flags. */ struct pt_conf_flags flags; } block; /* - instruction flow decoder. */ struct { /* A collection of decoder-specific flags. */ struct pt_conf_flags flags; } insn; /* The image section cache. */ struct pt_image_section_cache *iscache; #if defined(FEATURE_SIDEBAND) /* The sideband session. */ struct pt_sb_session *session; #if defined(FEATURE_PEVENT) /* The perf event sideband decoder configuration. */ struct pt_sb_pevent_config pevent; #endif /* defined(FEATURE_PEVENT) */ #endif /* defined(FEATURE_SIDEBAND) */ }; /* A collection of options. */ struct ptxed_options { #if defined(FEATURE_SIDEBAND) /* Sideband dump flags. */ uint32_t sb_dump_flags; #endif /* Do not print the instruction. */ uint32_t dont_print_insn:1; /* Remain as quiet as possible - excluding error messages. */ uint32_t quiet:1; /* Print statistics (overrides quiet). */ uint32_t print_stats:1; /* Print information about section loads and unloads. */ uint32_t track_image:1; /* Track blocks in the output. * * This only applies to the block decoder. */ uint32_t track_blocks:1; /* Print in AT&T format. */ uint32_t att_format:1; /* Print the offset into the trace file. */ uint32_t print_offset:1; /* Print the current timestamp. */ uint32_t print_time:1; /* Print the raw bytes for an insn. */ uint32_t print_raw_insn:1; /* Perform checks. */ uint32_t check:1; /* Print the time stamp of events. */ uint32_t print_event_time:1; /* Print the ip of events. */ uint32_t print_event_ip:1; #if defined(FEATURE_SIDEBAND) /* Print sideband warnings. */ uint32_t print_sb_warnings:1; #endif }; /* A collection of flags selecting which stats to collect/print. */ enum ptxed_stats_flag { /* Collect number of instructions. */ ptxed_stat_insn = (1 << 0), /* Collect number of blocks. */ ptxed_stat_blocks = (1 << 1) }; /* A collection of statistics. */ struct ptxed_stats { /* The number of instructions. */ uint64_t insn; /* The number of blocks. * * This only applies to the block decoder. */ uint64_t blocks; /* A collection of flags saying which statistics to collect/print. */ uint32_t flags; }; static int ptxed_have_decoder(const struct ptxed_decoder *decoder) { /* It suffices to check for one decoder in the variant union. */ return decoder && decoder->variant.insn; } static int ptxed_init_decoder(struct ptxed_decoder *decoder) { if (!decoder) return -pte_internal; memset(decoder, 0, sizeof(*decoder)); decoder->type = pdt_block_decoder; decoder->iscache = pt_iscache_alloc(NULL); if (!decoder->iscache) return -pte_nomem; #if defined(FEATURE_SIDEBAND) decoder->session = pt_sb_alloc(decoder->iscache); if (!decoder->session) { pt_iscache_free(decoder->iscache); return -pte_nomem; } #if defined(FEATURE_PEVENT) memset(&decoder->pevent, 0, sizeof(decoder->pevent)); decoder->pevent.size = sizeof(decoder->pevent); decoder->pevent.kernel_start = UINT64_MAX; decoder->pevent.time_mult = 1; #endif /* defined(FEATURE_PEVENT) */ #endif /* defined(FEATURE_SIDEBAND) */ return 0; } static void ptxed_free_decoder(struct ptxed_decoder *decoder) { if (!decoder) return; switch (decoder->type) { case pdt_insn_decoder: pt_insn_free_decoder(decoder->variant.insn); break; case pdt_block_decoder: pt_blk_free_decoder(decoder->variant.block); break; } #if defined(FEATURE_SIDEBAND) pt_sb_free(decoder->session); #endif pt_iscache_free(decoder->iscache); } static void help(const char *name) { printf("usage: %s [<options>]\n\n", name); printf("options:\n"); printf(" --help|-h this text.\n"); printf(" --version display version information and exit.\n"); printf(" --att print instructions in att format.\n"); printf(" --no-inst do not print instructions (only addresses).\n"); printf(" --quiet|-q do not print anything (except errors).\n"); printf(" --offset print the offset into the trace file.\n"); printf(" --time print the current timestamp.\n"); printf(" --raw-insn print the raw bytes of each instruction.\n"); printf(" --check perform checks (expensive).\n"); printf(" --iscache-limit <size> set the image section cache limit to <size> bytes.\n"); printf(" --event:time print the tsc for events if available.\n"); printf(" --event:ip print the ip of events if available.\n"); printf(" --event:tick request tick events.\n"); printf(" --filter:addr<n>_cfg <cfg> set IA32_RTIT_CTL.ADDRn_CFG to <cfg>.\n"); printf(" --filter:addr<n>_a <base> set IA32_RTIT_ADDRn_A to <base>.\n"); printf(" --filter:addr<n>_b <limit> set IA32_RTIT_ADDRn_B to <limit>.\n"); printf(" --stat print statistics (even when quiet).\n"); printf(" collects all statistics unless one or more are selected.\n"); printf(" --stat:insn collect number of instructions.\n"); printf(" --stat:blocks collect number of blocks.\n"); #if defined(FEATURE_SIDEBAND) printf(" --sb:compact | --sb show sideband records in compact format.\n"); printf(" --sb:verbose show sideband records in verbose format.\n"); printf(" --sb:filename show the filename on sideband records.\n"); printf(" --sb:offset show the offset on sideband records.\n"); printf(" --sb:time show the time on sideband records.\n"); printf(" --sb:switch print the new image name on context switches.\n"); printf(" --sb:warn show sideband warnings.\n"); #if defined(FEATURE_PEVENT) printf(" --pevent:primary/secondary <file>[:<from>[-<to>]]\n"); printf(" load a perf_event sideband stream from <file>.\n"); printf(" an optional offset or range can be given.\n"); printf(" --pevent:sample-type <val> set perf_event_attr.sample_type to <val> (default: 0).\n"); printf(" --pevent:time-zero <val> set perf_event_mmap_page.time_zero to <val> (default: 0).\n"); printf(" --pevent:time-shift <val> set perf_event_mmap_page.time_shift to <val> (default: 0).\n"); printf(" --pevent:time-mult <val> set perf_event_mmap_page.time_mult to <val> (default: 1).\n"); printf(" --pevent:tsc-offset <val> show perf events <val> ticks earlier.\n"); printf(" --pevent:kernel-start <val> the start address of the kernel.\n"); printf(" --pevent:sysroot <path> prepend <path> to sideband filenames.\n"); #if defined(FEATURE_ELF) printf(" --pevent:kcore <file> load the kernel from a core dump.\n"); #endif /* defined(FEATURE_ELF) */ printf(" --pevent:vdso-x64 <file> use <file> as 64-bit vdso.\n"); printf(" --pevent:vdso-x32 <file> use <file> as x32 vdso.\n"); printf(" --pevent:vdso-ia32 <file> use <file> as 32-bit vdso.\n"); #endif /* defined(FEATURE_PEVENT) */ #endif /* defined(FEATURE_SIDEBAND) */ printf(" --verbose|-v print various information (even when quiet).\n"); printf(" --pt <file>[:<from>[-<to>]] load the processor trace data from <file>.\n"); printf(" an optional offset or range can be given.\n"); #if defined(FEATURE_ELF) printf(" --elf <<file>[:<base>] load an ELF from <file> at address <base>.\n"); printf(" use the default load address if <base> is omitted.\n"); #endif /* defined(FEATURE_ELF) */ printf(" --raw <file>[:<from>[-<to>]]:<base> load a raw binary from <file> at address <base>.\n"); printf(" an optional offset or range can be given.\n"); printf(" --cpu none|f/m[/s] set cpu to the given value and decode according to:\n"); printf(" none spec (default)\n"); printf(" f/m[/s] family/model[/stepping]\n"); printf(" --mtc-freq <n> set the MTC frequency (IA32_RTIT_CTL[17:14]) to <n>.\n"); printf(" --nom-freq <n> set the nominal frequency (MSR_PLATFORM_INFO[15:8]) to <n>.\n"); printf(" --cpuid-0x15.eax set the value of cpuid[0x15].eax.\n"); printf(" --cpuid-0x15.ebx set the value of cpuid[0x15].ebx.\n"); printf(" --insn-decoder use the instruction flow decoder.\n"); printf(" --insn:keep-tcal-on-ovf preserve timing calibration on overflow.\n"); printf(" --block-decoder use the block decoder (default).\n"); printf(" --block:show-blocks show blocks in the output.\n"); printf(" --block:end-on-call set the end-on-call block decoder flag.\n"); printf(" --block:end-on-jump set the end-on-jump block decoder flag.\n"); printf(" --block:keep-tcal-on-ovf preserve timing calibration on overflow.\n"); printf("\n"); #if defined(FEATURE_ELF) printf("You must specify at least one binary or ELF file (--raw|--elf).\n"); #else /* defined(FEATURE_ELF) */ printf("You must specify at least one binary file (--raw).\n"); #endif /* defined(FEATURE_ELF) */ printf("You must specify exactly one processor trace file (--pt).\n"); } static int extract_base(char *arg, uint64_t *base) { char *sep, *rest; sep = strrchr(arg, ':'); if (sep) { uint64_t num; if (!sep[1]) return 0; errno = 0; num = strtoull(sep+1, &rest, 0); if (errno || *rest) return 0; *base = num; *sep = 0; return 1; } return 0; } static int parse_range(const char *arg, uint64_t *begin, uint64_t *end) { char *rest; if (!arg || !*arg) return 0; errno = 0; *begin = strtoull(arg, &rest, 0); if (errno) return -1; if (!*rest) return 1; if (*rest != '-') return -1; *end = strtoull(rest+1, &rest, 0); if (errno || *rest) return -1; return 2; } /* Preprocess a filename argument. * * A filename may optionally be followed by a file offset or a file range * argument separated by ':'. Split the original argument into the filename * part and the offset/range part. * * If no end address is specified, set @size to zero. * If no offset is specified, set @offset to zero. * * Returns zero on success, a negative error code otherwise. */ static int preprocess_filename(char *filename, uint64_t *offset, uint64_t *size) { uint64_t begin, end; char *range; int parts; if (!filename || !offset || !size) return -pte_internal; /* Search from the end as the filename may also contain ':'. */ range = strrchr(filename, ':'); if (!range) { *offset = 0ull; *size = 0ull; return 0; } /* Let's try to parse an optional range suffix. * * If we can, remove it from the filename argument. * If we can not, assume that the ':' is part of the filename, e.g. a * drive letter on Windows. */ parts = parse_range(range + 1, &begin, &end); if (parts <= 0) { *offset = 0ull; *size = 0ull; return 0; } if (parts == 1) { *offset = begin; *size = 0ull; *range = 0; return 0; } if (parts == 2) { if (end <= begin) return -pte_invalid; *offset = begin; *size = end - begin; *range = 0; return 0; } return -pte_internal; } static int load_file(uint8_t **buffer, size_t *psize, const char *filename, uint64_t offset, uint64_t size, const char *prog) { uint8_t *content; size_t read; FILE *file; long fsize, begin, end; int errcode; if (!buffer || !psize || !filename || !prog) { fprintf(stderr, "%s: internal error.\n", prog ? prog : ""); return -1; } errno = 0; file = fopen(filename, "rb"); if (!file) { fprintf(stderr, "%s: failed to open %s: %d.\n", prog, filename, errno); return -1; } errcode = fseek(file, 0, SEEK_END); if (errcode) { fprintf(stderr, "%s: failed to determine size of %s: %d.\n", prog, filename, errno); goto err_file; } fsize = ftell(file); if (fsize < 0) { fprintf(stderr, "%s: failed to determine size of %s: %d.\n", prog, filename, errno); goto err_file; } begin = (long) offset; if (((uint64_t) begin != offset) || (fsize <= begin)) { fprintf(stderr, "%s: bad offset 0x%" PRIx64 " into %s.\n", prog, offset, filename); goto err_file; } end = fsize; if (size) { uint64_t range_end; range_end = offset + size; if ((uint64_t) end < range_end) { fprintf(stderr, "%s: bad range 0x%" PRIx64 " in %s.\n", prog, range_end, filename); goto err_file; } end = (long) range_end; } fsize = end - begin; content = malloc((size_t) fsize); if (!content) { fprintf(stderr, "%s: failed to allocated memory %s.\n", prog, filename); goto err_file; } errcode = fseek(file, begin, SEEK_SET); if (errcode) { fprintf(stderr, "%s: failed to load %s: %d.\n", prog, filename, errno); goto err_content; } read = fread(content, (size_t) fsize, 1u, file); if (read != 1) { fprintf(stderr, "%s: failed to load %s: %d.\n", prog, filename, errno); goto err_content; } fclose(file); *buffer = content; *psize = (size_t) fsize; return 0; err_content: free(content); err_file: fclose(file); return -1; } static int load_pt(struct pt_config *config, char *arg, const char *prog) { uint64_t foffset, fsize; uint8_t *buffer; size_t size; int errcode; errcode = preprocess_filename(arg, &foffset, &fsize); if (errcode < 0) { fprintf(stderr, "%s: bad file %s: %s.\n", prog, arg, pt_errstr(pt_errcode(errcode))); return -1; } errcode = load_file(&buffer, &size, arg, foffset, fsize, prog); if (errcode < 0) return errcode; config->begin = buffer; config->end = buffer + size; return 0; } static int load_raw(struct pt_image_section_cache *iscache, struct pt_image *image, char *arg, const char *prog) { uint64_t base, foffset, fsize; int isid, errcode, has_base; has_base = extract_base(arg, &base); if (has_base <= 0) { fprintf(stderr, "%s: failed to parse base address" "from '%s'.\n", prog, arg); return -1; } errcode = preprocess_filename(arg, &foffset, &fsize); if (errcode < 0) { fprintf(stderr, "%s: bad file %s: %s.\n", prog, arg, pt_errstr(pt_errcode(errcode))); return -1; } if (!fsize) fsize = UINT64_MAX; isid = pt_iscache_add_file(iscache, arg, foffset, fsize, base); if (isid < 0) { fprintf(stderr, "%s: failed to add %s at 0x%" PRIx64 ": %s.\n", prog, arg, base, pt_errstr(pt_errcode(isid))); return -1; } errcode = pt_image_add_cached(image, iscache, isid, NULL); if (errcode < 0) { fprintf(stderr, "%s: failed to add %s at 0x%" PRIx64 ": %s.\n", prog, arg, base, pt_errstr(pt_errcode(errcode))); return -1; } return 0; } static xed_machine_mode_enum_t translate_mode(enum pt_exec_mode mode) { switch (mode) { case ptem_unknown: return XED_MACHINE_MODE_INVALID; case ptem_16bit: return XED_MACHINE_MODE_LEGACY_16; case ptem_32bit: return XED_MACHINE_MODE_LEGACY_32; case ptem_64bit: return XED_MACHINE_MODE_LONG_64; } return XED_MACHINE_MODE_INVALID; } static const char *visualize_iclass(enum pt_insn_class iclass) { switch (iclass) { case ptic_unknown: return "unknown"; case ptic_other: return "other"; case ptic_call: return "near call"; case ptic_return: return "near return"; case ptic_jump: return "near jump"; case ptic_cond_jump: return "cond jump"; case ptic_far_call: return "far call"; case ptic_far_return: return "far return"; case ptic_far_jump: return "far jump"; case ptic_ptwrite: return "ptwrite"; case ptic_indirect: return "indirect"; } return "undefined"; } static void check_insn_iclass(const xed_inst_t *inst, const struct pt_insn *insn, uint64_t offset) { xed_category_enum_t category; xed_iclass_enum_t iclass; if (!inst || !insn) { printf("[internal error]\n"); return; } category = xed_inst_category(inst); iclass = xed_inst_iclass(inst); switch (insn->iclass) { case ptic_unknown: break; case ptic_ptwrite: case ptic_other: switch (category) { default: return; case XED_CATEGORY_CALL: case XED_CATEGORY_RET: case XED_CATEGORY_UNCOND_BR: case XED_CATEGORY_SYSCALL: case XED_CATEGORY_SYSRET: break; case XED_CATEGORY_COND_BR: switch (iclass) { case XED_ICLASS_XBEGIN: case XED_ICLASS_XEND: return; default: break; } break; case XED_CATEGORY_INTERRUPT: switch (iclass) { case XED_ICLASS_BOUND: return; default: break; } break; } break; case ptic_call: if (iclass == XED_ICLASS_CALL_NEAR) return; break; case ptic_return: if (iclass == XED_ICLASS_RET_NEAR) return; break; case ptic_jump: if (iclass == XED_ICLASS_JMP) return; break; case ptic_cond_jump: if (category == XED_CATEGORY_COND_BR) return; break; case ptic_far_call: switch (iclass) { default: break; case XED_ICLASS_CALL_FAR: case XED_ICLASS_INT: case XED_ICLASS_INT1: case XED_ICLASS_INT3: case XED_ICLASS_INTO: case XED_ICLASS_SYSCALL: case XED_ICLASS_SYSCALL_AMD: case XED_ICLASS_SYSENTER: case XED_ICLASS_VMCALL: return; } break; case ptic_far_return: switch (iclass) { default: break; case XED_ICLASS_RET_FAR: case XED_ICLASS_IRET: case XED_ICLASS_IRETD: case XED_ICLASS_IRETQ: case XED_ICLASS_SYSRET: case XED_ICLASS_SYSRET_AMD: case XED_ICLASS_SYSEXIT: case XED_ICLASS_VMLAUNCH: case XED_ICLASS_VMRESUME: return; } break; case ptic_far_jump: if (iclass == XED_ICLASS_JMP_FAR) return; break; case ptic_indirect: switch (iclass) { default: break; case XED_ICLASS_CALL_FAR: case XED_ICLASS_INT: case XED_ICLASS_INT1: case XED_ICLASS_INT3: case XED_ICLASS_INTO: case XED_ICLASS_SYSCALL: case XED_ICLASS_SYSCALL_AMD: case XED_ICLASS_SYSENTER: case XED_ICLASS_VMCALL: case XED_ICLASS_RET_FAR: case XED_ICLASS_IRET: case XED_ICLASS_IRETD: case XED_ICLASS_IRETQ: case XED_ICLASS_SYSRET: case XED_ICLASS_SYSRET_AMD: case XED_ICLASS_SYSEXIT: case XED_ICLASS_VMLAUNCH: case XED_ICLASS_VMRESUME: case XED_ICLASS_JMP_FAR: case XED_ICLASS_JMP: return; } break; } /* If we get here, @insn->iclass doesn't match XED's classification. */ printf("[%" PRIx64 ", %" PRIx64 ": iclass error: iclass: %s, " "xed iclass: %s, category: %s]\n", offset, insn->ip, visualize_iclass(insn->iclass), xed_iclass_enum_t2str(iclass), xed_category_enum_t2str(category)); } static void check_insn_decode(xed_decoded_inst_t *inst, const struct pt_insn *insn, uint64_t offset) { xed_error_enum_t errcode; if (!inst || !insn) { printf("[internal error]\n"); return; } xed_decoded_inst_set_mode(inst, translate_mode(insn->mode), XED_ADDRESS_WIDTH_INVALID); /* Decode the instruction (again). * * We may have decoded the instruction already for printing. In this * case, we will decode it twice. * * The more common use-case, however, is to check the instruction class * while not printing instructions since the latter is too expensive for * regular use with long traces. */ errcode = xed_decode(inst, insn->raw, insn->size); if (errcode != XED_ERROR_NONE) { printf("[%" PRIx64 ", %" PRIx64 ": xed error: (%u) %s]\n", offset, insn->ip, errcode, xed_error_enum_t2str(errcode)); return; } if (!xed_decoded_inst_valid(inst)) { printf("[%" PRIx64 ", %" PRIx64 ": xed error: " "invalid instruction]\n", offset, insn->ip); return; } } static void check_insn(const struct pt_insn *insn, uint64_t offset) { xed_decoded_inst_t inst; if (!insn) { printf("[internal error]\n"); return; } if (insn->isid <= 0) printf("[%" PRIx64 ", %" PRIx64 ": check error: " "bad isid]\n", offset, insn->ip); xed_decoded_inst_zero(&inst); check_insn_decode(&inst, insn, offset); /* We need a valid instruction in order to do further checks. * * Invalid instructions have already been diagnosed. */ if (!xed_decoded_inst_valid(&inst)) return; check_insn_iclass(xed_decoded_inst_inst(&inst), insn, offset); } static void print_raw_insn(const struct pt_insn *insn) { uint8_t length, idx; if (!insn) { printf("[internal error]"); return; } length = insn->size; if (sizeof(insn->raw) < length) length = sizeof(insn->raw); for (idx = 0; idx < length; ++idx) printf(" %02x", insn->raw[idx]); for (; idx < pt_max_insn_size; ++idx) printf(" "); } static void xed_print_insn(const xed_decoded_inst_t *inst, uint64_t ip, const struct ptxed_options *options) { xed_print_info_t pi; char buffer[256]; xed_bool_t ok; if (!inst || !options) { printf(" [internal error]"); return; } if (options->print_raw_insn) { xed_uint_t length, i; length = xed_decoded_inst_get_length(inst); for (i = 0; i < length; ++i) printf(" %02x", xed_decoded_inst_get_byte(inst, i)); for (; i < pt_max_insn_size; ++i) printf(" "); } xed_init_print_info(&pi); pi.p = inst; pi.buf = buffer; pi.blen = sizeof(buffer); pi.runtime_address = ip; if (options->att_format) pi.syntax = XED_SYNTAX_ATT; ok = xed_format_generic(&pi); if (!ok) { printf(" [xed print error]"); return; } printf(" %s", buffer); } static void print_insn(const struct pt_insn *insn, xed_state_t *xed, const struct ptxed_options *options, uint64_t offset, uint64_t time) { if (!insn || !options) { printf("[internal error]\n"); return; } if (options->print_offset) printf("%016" PRIx64 " ", offset); if (options->print_time) printf("%016" PRIx64 " ", time); if (insn->speculative) printf("? "); printf("%016" PRIx64, insn->ip); if (!options->dont_print_insn) { xed_machine_mode_enum_t mode; xed_decoded_inst_t inst; xed_error_enum_t errcode; mode = translate_mode(insn->mode); xed_state_set_machine_mode(xed, mode); xed_decoded_inst_zero_set_mode(&inst, xed); errcode = xed_decode(&inst, insn->raw, insn->size); switch (errcode) { case XED_ERROR_NONE: xed_print_insn(&inst, insn->ip, options); break; default: print_raw_insn(insn); printf(" [xed decode error: (%u) %s]", errcode, xed_error_enum_t2str(errcode)); break; } } printf("\n"); } static const char *print_exec_mode(enum pt_exec_mode mode) { switch (mode) { case ptem_unknown: return "<unknown>"; case ptem_16bit: return "16-bit"; case ptem_32bit: return "32-bit"; case ptem_64bit: return "64-bit"; } return "<invalid>"; } static void print_event(const struct pt_event *event, const struct ptxed_options *options, uint64_t offset) { if (!event || !options) { printf("[internal error]\n"); return; } printf("["); if (options->print_offset) printf("%016" PRIx64 " ", offset); if (options->print_event_time && event->has_tsc) printf("%016" PRIx64 " ", event->tsc); switch (event->type) { case ptev_enabled: printf("%s", event->variant.enabled.resumed ? "resumed" : "enabled"); if (options->print_event_ip) printf(", ip: %016" PRIx64, event->variant.enabled.ip); break; case ptev_disabled: printf("disabled"); if (options->print_event_ip && !event->ip_suppressed) printf(", ip: %016" PRIx64, event->variant.disabled.ip); break; case ptev_async_disabled: printf("disabled"); if (options->print_event_ip) { printf(", at: %016" PRIx64, event->variant.async_disabled.at); if (!event->ip_suppressed) printf(", ip: %016" PRIx64, event->variant.async_disabled.ip); } break; case ptev_async_branch: printf("interrupt"); if (options->print_event_ip) { printf(", from: %016" PRIx64, event->variant.async_branch.from); if (!event->ip_suppressed) printf(", to: %016" PRIx64, event->variant.async_branch.to); } break; case ptev_paging: printf("paging, cr3: %016" PRIx64 "%s", event->variant.paging.cr3, event->variant.paging.non_root ? ", nr" : ""); break; case ptev_async_paging: printf("paging, cr3: %016" PRIx64 "%s", event->variant.async_paging.cr3, event->variant.async_paging.non_root ? ", nr" : ""); if (options->print_event_ip) printf(", ip: %016" PRIx64, event->variant.async_paging.ip); break; case ptev_overflow: printf("overflow"); if (options->print_event_ip && !event->ip_suppressed) printf(", ip: %016" PRIx64, event->variant.overflow.ip); break; case ptev_exec_mode: printf("exec mode: %s", print_exec_mode(event->variant.exec_mode.mode)); if (options->print_event_ip && !event->ip_suppressed) printf(", ip: %016" PRIx64, event->variant.exec_mode.ip); break; case ptev_tsx: if (event->variant.tsx.aborted) printf("aborted"); else if (event->variant.tsx.speculative) printf("begin transaction"); else printf("committed"); if (options->print_event_ip && !event->ip_suppressed) printf(", ip: %016" PRIx64, event->variant.tsx.ip); break; case ptev_stop: printf("stopped"); break; case ptev_vmcs: printf("vmcs, base: %016" PRIx64, event->variant.vmcs.base); break; case ptev_async_vmcs: printf("vmcs, base: %016" PRIx64, event->variant.async_vmcs.base); if (options->print_event_ip) printf(", ip: %016" PRIx64, event->variant.async_vmcs.ip); break; case ptev_exstop: printf("exstop"); if (options->print_event_ip && !event->ip_suppressed) printf(", ip: %016" PRIx64, event->variant.exstop.ip); break; case ptev_mwait: printf("mwait %" PRIx32 " %" PRIx32, event->variant.mwait.hints, event->variant.mwait.ext); if (options->print_event_ip && !event->ip_suppressed) printf(", ip: %016" PRIx64, event->variant.mwait.ip); break; case ptev_pwre: printf("pwre c%u.%u", (event->variant.pwre.state + 1) & 0xf, (event->variant.pwre.sub_state + 1) & 0xf); if (event->variant.pwre.hw) printf(" hw"); break; case ptev_pwrx: printf("pwrx "); if (event->variant.pwrx.interrupt) printf("int: "); if (event->variant.pwrx.store) printf("st: "); if (event->variant.pwrx.autonomous) printf("hw: "); printf("c%u (c%u)", (event->variant.pwrx.last + 1) & 0xf, (event->variant.pwrx.deepest + 1) & 0xf); break; case ptev_ptwrite: printf("ptwrite: %" PRIx64, event->variant.ptwrite.payload); if (options->print_event_ip && !event->ip_suppressed) printf(", ip: %016" PRIx64, event->variant.ptwrite.ip); break; case ptev_tick: printf("tick"); if (options->print_event_ip && !event->ip_suppressed) printf(", ip: %016" PRIx64, event->variant.tick.ip); break; case ptev_cbr: printf("cbr: %x", event->variant.cbr.ratio); break; case ptev_mnt: printf("mnt: %" PRIx64, event->variant.mnt.payload); break; case ptev_tip: printf("tip: %" PRIx64, event->variant.tip.ip); break; case ptev_tnt: { uint64_t index; printf("tnt: "); for (index = event->variant.tnt.size; index; index >>= 1) printf("%s", (event->variant.tnt.bits & index) ? "!" : "."); } break; } printf("]\n"); } static void diagnose(struct ptxed_decoder *decoder, uint64_t ip, const char *errtype, int errcode) { int err; uint64_t pos; err = -pte_internal; pos = 0ull; switch (decoder->type) { case pdt_insn_decoder: err = pt_insn_get_offset(decoder->variant.insn, &pos); break; case pdt_block_decoder: err = pt_blk_get_offset(decoder->variant.block, &pos); break; } if (err < 0) { printf("could not determine offset: %s\n", pt_errstr(pt_errcode(err))); printf("[?, %" PRIx64 ": %s: %s]\n", ip, errtype, pt_errstr(pt_errcode(errcode))); } else printf("[%" PRIx64 ", %" PRIx64 ": %s: %s]\n", pos, ip, errtype, pt_errstr(pt_errcode(errcode))); } #if defined(FEATURE_SIDEBAND) static int ptxed_sb_event(struct ptxed_decoder *decoder, const struct pt_event *event, const struct ptxed_options *options) { struct pt_image *image; int errcode; if (!decoder || !event || !options) return -pte_internal; image = NULL; errcode = pt_sb_event(decoder->session, &image, event, sizeof(*event), stdout, options->sb_dump_flags); if (errcode < 0) return errcode; if (!image) return 0; switch (decoder->type) { case pdt_insn_decoder: return pt_insn_set_image(decoder->variant.insn, image); case pdt_block_decoder: return pt_blk_set_image(decoder->variant.block, image); } return -pte_internal; } #endif /* defined(FEATURE_SIDEBAND) */ static int drain_events_insn(struct ptxed_decoder *decoder, uint64_t *time, int status, const struct ptxed_options *options) { struct pt_insn_decoder *ptdec; int errcode; if (!decoder || !time || !options) return -pte_internal; ptdec = decoder->variant.insn; while (status & pts_event_pending) { struct pt_event event; uint64_t offset; offset = 0ull; if (options->print_offset) { errcode = pt_insn_get_offset(ptdec, &offset); if (errcode < 0) return errcode; } status = pt_insn_event(ptdec, &event, sizeof(event)); if (status < 0) return status; *time = event.tsc; if (!options->quiet && !event.status_update) print_event(&event, options, offset); #if defined(FEATURE_SIDEBAND) errcode = ptxed_sb_event(decoder, &event, options); if (errcode < 0) return errcode; #endif /* defined(FEATURE_SIDEBAND) */ } return status; } static void decode_insn(struct ptxed_decoder *decoder, const struct ptxed_options *options, struct ptxed_stats *stats) { struct pt_insn_decoder *ptdec; xed_state_t xed; uint64_t offset, sync, time; if (!decoder || !options) { printf("[internal error]\n"); return; } xed_state_zero(&xed); ptdec = decoder->variant.insn; offset = 0ull; sync = 0ull; time = 0ull; for (;;) { struct pt_insn insn; int status; /* Initialize the IP - we use it for error reporting. */ insn.ip = 0ull; status = pt_insn_sync_forward(ptdec); if (status < 0) { uint64_t new_sync; int errcode; if (status == -pte_eos) break; diagnose(decoder, insn.ip, "sync error", status); /* Let's see if we made any progress. If we haven't, * we likely never will. Bail out. * * We intentionally report the error twice to indicate * that we tried to re-sync. Maybe it even changed. */ errcode = pt_insn_get_offset(ptdec, &new_sync); if (errcode < 0 || (new_sync <= sync)) break; sync = new_sync; continue; } for (;;) { status = drain_events_insn(decoder, &time, status, options); if (status < 0) break; if (status & pts_eos) { if (!(status & pts_ip_suppressed) && !options->quiet) printf("[end of trace]\n"); status = -pte_eos; break; } if (options->print_offset || options->check) { int errcode; errcode = pt_insn_get_offset(ptdec, &offset); if (errcode < 0) break; } status = pt_insn_next(ptdec, &insn, sizeof(insn)); if (status < 0) { /* Even in case of errors, we may have succeeded * in decoding the current instruction. */ if (insn.iclass != ptic_unknown) { if (!options->quiet) print_insn(&insn, &xed, options, offset, time); if (stats) stats->insn += 1; if (options->check) check_insn(&insn, offset); } break; } if (!options->quiet) print_insn(&insn, &xed, options, offset, time); if (stats) stats->insn += 1; if (options->check) check_insn(&insn, offset); } /* We shouldn't break out of the loop without an error. */ if (!status) status = -pte_internal; /* We're done when we reach the end of the trace stream. */ if (status == -pte_eos) break; diagnose(decoder, insn.ip, "error", status); } } static int xed_next_ip(uint64_t *pip, const xed_decoded_inst_t *inst, uint64_t ip) { xed_uint_t length, disp_width; if (!pip || !inst) return -pte_internal; length = xed_decoded_inst_get_length(inst); if (!length) { printf("[xed error: failed to determine instruction length]\n"); return -pte_bad_insn; } ip += length; /* If it got a branch displacement it must be a branch. * * This includes conditional branches for which we don't know whether * they were taken. The next IP won't be used in this case as a * conditional branch ends a block. The next block will start with the * correct IP. */ disp_width = xed_decoded_inst_get_branch_displacement_width(inst); if (disp_width) ip += (uint64_t) (int64_t) xed_decoded_inst_get_branch_displacement(inst); *pip = ip; return 0; } static int block_fetch_insn(struct pt_insn *insn, const struct pt_block *block, uint64_t ip, struct pt_image_section_cache *iscache) { if (!insn || !block) return -pte_internal; /* We can't read from an empty block. */ if (!block->ninsn) return -pte_invalid; memset(insn, 0, sizeof(*insn)); insn->mode = block->mode; insn->isid = block->isid; insn->ip = ip; /* The last instruction in a block may be truncated. */ if ((ip == block->end_ip) && block->truncated) { if (!block->size || (sizeof(insn->raw) < (size_t) block->size)) return -pte_bad_insn; insn->size = block->size; memcpy(insn->raw, block->raw, insn->size); } else { int size; size = pt_iscache_read(iscache, insn->raw, sizeof(insn->raw), insn->isid, ip); if (size < 0) return size; insn->size = (uint8_t) size; } return 0; } static void diagnose_block(struct ptxed_decoder *decoder, const char *errtype, int errcode, const struct pt_block *block) { uint64_t ip; int err; if (!decoder || !block) { printf("ptxed: internal error"); return; } /* Determine the IP at which to report the error. * * Depending on the type of error, the IP varies between that of the * last instruction in @block or the next instruction outside of @block. * * When the block is empty, we use the IP of the block itself, * i.e. where the first instruction should have been. */ if (!block->ninsn) ip = block->ip; else { ip = block->end_ip; switch (errcode) { case -pte_nomap: case -pte_bad_insn: { struct pt_insn insn; xed_decoded_inst_t inst; xed_error_enum_t xederr; /* Decode failed when trying to fetch or decode the next * instruction. Since indirect or conditional branches * end a block and don't cause an additional fetch, we * should be able to reach that IP from the last * instruction in @block. * * We ignore errors and fall back to the IP of the last * instruction. */ err = block_fetch_insn(&insn, block, ip, decoder->iscache); if (err < 0) break; xed_decoded_inst_zero(&inst); xed_decoded_inst_set_mode(&inst, translate_mode(insn.mode), XED_ADDRESS_WIDTH_INVALID); xederr = xed_decode(&inst, insn.raw, insn.size); if (xederr != XED_ERROR_NONE) break; (void) xed_next_ip(&ip, &inst, insn.ip); } break; default: break; } } diagnose(decoder, ip, errtype, errcode); } static void print_block(struct ptxed_decoder *decoder, const struct pt_block *block, const struct ptxed_options *options, const struct ptxed_stats *stats, uint64_t offset, uint64_t time) { xed_machine_mode_enum_t mode; xed_state_t xed; uint64_t ip; uint16_t ninsn; if (!block || !options) { printf("[internal error]\n"); return; } if (options->track_blocks) { printf("[block"); if (stats) printf(" %" PRIx64, stats->blocks); printf("]\n"); } mode = translate_mode(block->mode); xed_state_init2(&xed, mode, XED_ADDRESS_WIDTH_INVALID); /* There's nothing to do for empty blocks. */ ninsn = block->ninsn; if (!ninsn) return; ip = block->ip; for (;;) { struct pt_insn insn; xed_decoded_inst_t inst; xed_error_enum_t xederrcode; int errcode; if (options->print_offset) printf("%016" PRIx64 " ", offset); if (options->print_time) printf("%016" PRIx64 " ", time); if (block->speculative) printf("? "); printf("%016" PRIx64, ip); errcode = block_fetch_insn(&insn, block, ip, decoder->iscache); if (errcode < 0) { printf(" [fetch error: %s]\n", pt_errstr(pt_errcode(errcode))); break; } xed_decoded_inst_zero_set_mode(&inst, &xed); xederrcode = xed_decode(&inst, insn.raw, insn.size); if (xederrcode != XED_ERROR_NONE) { print_raw_insn(&insn); printf(" [xed decode error: (%u) %s]\n", xederrcode, xed_error_enum_t2str(xederrcode)); break; } if (!options->dont_print_insn) xed_print_insn(&inst, insn.ip, options); printf("\n"); ninsn -= 1; if (!ninsn) break; errcode = xed_next_ip(&ip, &inst, ip); if (errcode < 0) { diagnose(decoder, ip, "reconstruct error", errcode); break; } } /* Decode should have brought us to @block->end_ip. */ if (ip != block->end_ip) diagnose(decoder, ip, "reconstruct error", -pte_nosync); } static void check_block(const struct pt_block *block, struct pt_image_section_cache *iscache, uint64_t offset) { struct pt_insn insn; xed_decoded_inst_t inst; uint64_t ip; uint16_t ninsn; int errcode; if (!block) { printf("[internal error]\n"); return; } /* There's nothing to check for an empty block. */ ninsn = block->ninsn; if (!ninsn) return; if (block->isid <= 0) printf("[%" PRIx64 ", %" PRIx64 ": check error: " "bad isid]\n", offset, block->ip); ip = block->ip; do { errcode = block_fetch_insn(&insn, block, ip, iscache); if (errcode < 0) { printf("[%" PRIx64 ", %" PRIx64 ": fetch error: %s]\n", offset, ip, pt_errstr(pt_errcode(errcode))); return; } xed_decoded_inst_zero(&inst); check_insn_decode(&inst, &insn, offset); /* We need a valid instruction in order to do further checks. * * Invalid instructions have already been diagnosed. */ if (!xed_decoded_inst_valid(&inst)) return; errcode = xed_next_ip(&ip, &inst, ip); if (errcode < 0) { printf("[%" PRIx64 ", %" PRIx64 ": error: %s]\n", offset, ip, pt_errstr(pt_errcode(errcode))); return; } } while (--ninsn); /* We reached the end of the block. Both @insn and @inst refer to the * last instruction in @block. * * Check that we reached the end IP of the block. */ if (insn.ip != block->end_ip) { printf("[%" PRIx64 ", %" PRIx64 ": error: did not reach end: %" PRIx64 "]\n", offset, insn.ip, block->end_ip); } /* Check the last instruction's classification, if available. */ insn.iclass = block->iclass; if (insn.iclass) check_insn_iclass(xed_decoded_inst_inst(&inst), &insn, offset); } static int drain_events_block(struct ptxed_decoder *decoder, uint64_t *time, int status, const struct ptxed_options *options) { struct pt_block_decoder *ptdec; int errcode; if (!decoder || !time || !options) return -pte_internal; ptdec = decoder->variant.block; while (status & pts_event_pending) { struct pt_event event; uint64_t offset; offset = 0ull; if (options->print_offset) { errcode = pt_blk_get_offset(ptdec, &offset); if (errcode < 0) return errcode; } status = pt_blk_event(ptdec, &event, sizeof(event)); if (status < 0) return status; *time = event.tsc; if (!options->quiet && !event.status_update) print_event(&event, options, offset); #if defined(FEATURE_SIDEBAND) errcode = ptxed_sb_event(decoder, &event, options); if (errcode < 0) return errcode; #endif /* defined(FEATURE_SIDEBAND) */ } return status; } static void decode_block(struct ptxed_decoder *decoder, const struct ptxed_options *options, struct ptxed_stats *stats) { struct pt_image_section_cache *iscache; struct pt_block_decoder *ptdec; uint64_t offset, sync, time; if (!decoder || !options) { printf("[internal error]\n"); return; } iscache = decoder->iscache; ptdec = decoder->variant.block; offset = 0ull; sync = 0ull; time = 0ull; for (;;) { struct pt_block block; int status; /* Initialize IP and ninsn - we use it for error reporting. */ block.ip = 0ull; block.ninsn = 0u; status = pt_blk_sync_forward(ptdec); if (status < 0) { uint64_t new_sync; int errcode; if (status == -pte_eos) break; diagnose_block(decoder, "sync error", status, &block); /* Let's see if we made any progress. If we haven't, * we likely never will. Bail out. * * We intentionally report the error twice to indicate * that we tried to re-sync. Maybe it even changed. */ errcode = pt_blk_get_offset(ptdec, &new_sync); if (errcode < 0 || (new_sync <= sync)) break; sync = new_sync; continue; } for (;;) { status = drain_events_block(decoder, &time, status, options); if (status < 0) break; if (status & pts_eos) { if (!(status & pts_ip_suppressed) && !options->quiet) printf("[end of trace]\n"); status = -pte_eos; break; } if (options->print_offset || options->check) { int errcode; errcode = pt_blk_get_offset(ptdec, &offset); if (errcode < 0) break; } status = pt_blk_next(ptdec, &block, sizeof(block)); if (status < 0) { /* Even in case of errors, we may have succeeded * in decoding some instructions. */ if (block.ninsn) { if (stats) { stats->insn += block.ninsn; stats->blocks += 1; } if (!options->quiet) print_block(decoder, &block, options, stats, offset, time); if (options->check) check_block(&block, iscache, offset); } break; } if (stats) { stats->insn += block.ninsn; stats->blocks += 1; } if (!options->quiet) print_block(decoder, &block, options, stats, offset, time); if (options->check) check_block(&block, iscache, offset); } /* We shouldn't break out of the loop without an error. */ if (!status) status = -pte_internal; /* We're done when we reach the end of the trace stream. */ if (status == -pte_eos) break; diagnose_block(decoder, "error", status, &block); } } static void decode(struct ptxed_decoder *decoder, const struct ptxed_options *options, struct ptxed_stats *stats) { if (!decoder) { printf("[internal error]\n"); return; } switch (decoder->type) { case pdt_insn_decoder: decode_insn(decoder, options, stats); break; case pdt_block_decoder: decode_block(decoder, options, stats); break; } } static int alloc_decoder(struct ptxed_decoder *decoder, const struct pt_config *conf, struct pt_image *image, const struct ptxed_options *options, const char *prog) { struct pt_config config; int errcode; if (!decoder || !conf || !options || !prog) return -pte_internal; config = *conf; switch (decoder->type) { case pdt_insn_decoder: config.flags = decoder->insn.flags; decoder->variant.insn = pt_insn_alloc_decoder(&config); if (!decoder->variant.insn) { fprintf(stderr, "%s: failed to create decoder.\n", prog); return -pte_nomem; } errcode = pt_insn_set_image(decoder->variant.insn, image); if (errcode < 0) { fprintf(stderr, "%s: failed to set image.\n", prog); return errcode; } break; case pdt_block_decoder: config.flags = decoder->block.flags; decoder->variant.block = pt_blk_alloc_decoder(&config); if (!decoder->variant.block) { fprintf(stderr, "%s: failed to create decoder.\n", prog); return -pte_nomem; } errcode = pt_blk_set_image(decoder->variant.block, image); if (errcode < 0) { fprintf(stderr, "%s: failed to set image.\n", prog); return errcode; } break; } return 0; } static void print_stats(struct ptxed_stats *stats) { if (!stats) { printf("[internal error]\n"); return; } if (stats->flags & ptxed_stat_insn) printf("insn: %" PRIu64 ".\n", stats->insn); if (stats->flags & ptxed_stat_blocks) printf("blocks:\t%" PRIu64 ".\n", stats->blocks); } #if defined(FEATURE_SIDEBAND) static int ptxed_print_error(int errcode, const char *filename, uint64_t offset, void *priv) { const struct ptxed_options *options; const char *errstr, *severity; options = (struct ptxed_options *) priv; if (!options) return -pte_internal; if (errcode >= 0 && !options->print_sb_warnings) return 0; if (!filename) filename = "<unknown>"; severity = errcode < 0 ? "error" : "warning"; errstr = errcode < 0 ? pt_errstr(pt_errcode(errcode)) : pt_sb_errstr((enum pt_sb_error_code) errcode); if (!errstr) errstr = "<unknown error>"; printf("[%s:%016" PRIx64 " sideband %s: %s]\n", filename, offset, severity, errstr); return 0; } static int ptxed_print_switch(const struct pt_sb_context *context, void *priv) { struct pt_image *image; const char *name; if (!priv) return -pte_internal; image = pt_sb_ctx_image(context); if (!image) return -pte_internal; name = pt_image_name(image); if (!name) name = "<unknown>"; printf("[context: %s]\n", name); return 0; } #if defined(FEATURE_PEVENT) static int ptxed_sb_pevent(struct ptxed_decoder *decoder, char *filename, const char *prog) { struct pt_sb_pevent_config config; uint64_t foffset, fsize, fend; int errcode; if (!decoder || !prog) { fprintf(stderr, "%s: internal error.\n", prog ? prog : "?"); return -1; } errcode = preprocess_filename(filename, &foffset, &fsize); if (errcode < 0) { fprintf(stderr, "%s: bad file %s: %s.\n", prog, filename, pt_errstr(pt_errcode(errcode))); return -1; } if (SIZE_MAX < foffset) { fprintf(stderr, "%s: bad offset: 0x%" PRIx64 ".\n", prog, foffset); return -1; } config = decoder->pevent; config.filename = filename; config.begin = (size_t) foffset; config.end = 0; if (fsize) { fend = foffset + fsize; if ((fend <= foffset) || (SIZE_MAX < fend)) { fprintf(stderr, "%s: bad range: 0x%" PRIx64 "-0x%" PRIx64 ".\n", prog, foffset, fend); return -1; } config.end = (size_t) fend; } errcode = pt_sb_alloc_pevent_decoder(decoder->session, &config); if (errcode < 0) { fprintf(stderr, "%s: error loading %s: %s.\n", prog, filename, pt_errstr(pt_errcode(errcode))); return -1; } return 0; } #endif /* defined(FEATURE_PEVENT) */ #endif /* defined(FEATURE_SIDEBAND) */ static int get_arg_uint64(uint64_t *value, const char *option, const char *arg, const char *prog) { char *rest; if (!value || !option || !prog) { fprintf(stderr, "%s: internal error.\n", prog ? prog : "?"); return 0; } if (!arg || arg[0] == 0 || (arg[0] == '-' && arg[1] == '-')) { fprintf(stderr, "%s: %s: missing argument.\n", prog, option); return 0; } errno = 0; *value = strtoull(arg, &rest, 0); if (errno || *rest) { fprintf(stderr, "%s: %s: bad argument: %s.\n", prog, option, arg); return 0; } return 1; } static int get_arg_uint32(uint32_t *value, const char *option, const char *arg, const char *prog) { uint64_t val; if (!get_arg_uint64(&val, option, arg, prog)) return 0; if (val > UINT32_MAX) { fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option, arg); return 0; } *value = (uint32_t) val; return 1; } #if defined(FEATURE_SIDEBAND) && defined(FEATURE_PEVENT) static int get_arg_uint16(uint16_t *value, const char *option, const char *arg, const char *prog) { uint64_t val; if (!get_arg_uint64(&val, option, arg, prog)) return 0; if (val > UINT16_MAX) { fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option, arg); return 0; } *value = (uint16_t) val; return 1; } #endif /* defined(FEATURE_SIDEBAND) && defined(FEATURE_PEVENT) */ static int get_arg_uint8(uint8_t *value, const char *option, const char *arg, const char *prog) { uint64_t val; if (!get_arg_uint64(&val, option, arg, prog)) return 0; if (val > UINT8_MAX) { fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option, arg); return 0; } *value = (uint8_t) val; return 1; } static int ptxed_addr_cfg(struct pt_config *config, uint8_t filter, const char *option, const char *arg, const char *prog) { uint64_t addr_cfg; if (!config || !option || !arg || !prog) { fprintf(stderr, "%s: internal error.\n", prog ? prog : "?"); return 0; } if (!get_arg_uint64(&addr_cfg, option, arg, prog)) return 0; if (15 < addr_cfg) { fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option, arg); return 0; } /* Make sure the shift doesn't overflow. */ if (15 < filter) { fprintf(stderr, "%s: internal error.\n", prog); return 0; } addr_cfg <<= (filter * 4); config->addr_filter.config.addr_cfg |= addr_cfg; return 1; } extern int main(int argc, char *argv[]) { struct ptxed_decoder decoder; struct ptxed_options options; struct ptxed_stats stats; struct pt_config config; struct pt_image *image; const char *prog; int errcode, i; if (!argc) { help(""); return 1; } prog = argv[0]; image = NULL; memset(&options, 0, sizeof(options)); memset(&stats, 0, sizeof(stats)); pt_config_init(&config); errcode = ptxed_init_decoder(&decoder); if (errcode < 0) { fprintf(stderr, "%s: error initializing decoder: %s.\n", prog, pt_errstr(pt_errcode(errcode))); goto err; } #if defined(FEATURE_SIDEBAND) pt_sb_notify_error(decoder.session, ptxed_print_error, &options); #endif image = pt_image_alloc(NULL); if (!image) { fprintf(stderr, "%s: failed to allocate image.\n", prog); goto err; } for (i = 1; i < argc;) { char *arg; arg = argv[i++]; if (strcmp(arg, "--help") == 0 || strcmp(arg, "-h") == 0) { help(prog); goto out; } if (strcmp(arg, "--version") == 0) { pt_print_tool_version(prog); goto out; } if (strcmp(arg, "--pt") == 0) { if (argc <= i) { fprintf(stderr, "%s: --pt: missing argument.\n", prog); goto out; } arg = argv[i++]; if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: duplicate pt sources: %s.\n", prog, arg); goto err; } if (config.cpu.vendor) { errcode = pt_cpu_errata(&config.errata, &config.cpu); if (errcode < 0) printf("[0, 0: config error: %s]\n", pt_errstr(pt_errcode(errcode))); } errcode = load_pt(&config, arg, prog); if (errcode < 0) goto err; errcode = alloc_decoder(&decoder, &config, image, &options, prog); if (errcode < 0) goto err; continue; } if (strcmp(arg, "--raw") == 0) { if (argc <= i) { fprintf(stderr, "%s: --raw: missing argument.\n", prog); goto out; } arg = argv[i++]; errcode = load_raw(decoder.iscache, image, arg, prog); if (errcode < 0) { fprintf(stderr, "%s: --raw: failed to load " "'%s'.\n", prog, arg); goto err; } continue; } #if defined(FEATURE_ELF) if (strcmp(arg, "--elf") == 0) { uint64_t base; if (argc <= i) { fprintf(stderr, "%s: --elf: missing argument.\n", prog); goto out; } arg = argv[i++]; base = 0ull; errcode = extract_base(arg, &base); if (errcode < 0) goto err; errcode = load_elf(decoder.iscache, image, arg, base, prog, options.track_image); if (errcode < 0) goto err; continue; } #endif /* defined(FEATURE_ELF) */ if (strcmp(arg, "--att") == 0) { options.att_format = 1; continue; } if (strcmp(arg, "--no-inst") == 0) { options.dont_print_insn = 1; continue; } if (strcmp(arg, "--quiet") == 0 || strcmp(arg, "-q") == 0) { options.quiet = 1; continue; } if (strcmp(arg, "--offset") == 0) { options.print_offset = 1; continue; } if (strcmp(arg, "--time") == 0) { options.print_time = 1; continue; } if (strcmp(arg, "--raw-insn") == 0) { options.print_raw_insn = 1; continue; } if (strcmp(arg, "--event:time") == 0) { options.print_event_time = 1; continue; } if (strcmp(arg, "--event:ip") == 0) { options.print_event_ip = 1; continue; } if (strcmp(arg, "--event:tick") == 0) { decoder.block.flags.variant.block. enable_tick_events = 1; decoder.insn.flags.variant.insn.enable_tick_events = 1; continue; } if (strcmp(arg, "--filter:addr0_cfg") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!ptxed_addr_cfg(&config, 0, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--filter:addr0_a") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!get_arg_uint64(&config.addr_filter.addr0_a, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--filter:addr0_b") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!get_arg_uint64(&config.addr_filter.addr0_b, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--filter:addr1_cfg") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!ptxed_addr_cfg(&config, 1, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--filter:addr1_a") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!get_arg_uint64(&config.addr_filter.addr1_a, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--filter:addr1_b") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!get_arg_uint64(&config.addr_filter.addr1_b, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--filter:addr2_cfg") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!ptxed_addr_cfg(&config, 2, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--filter:addr2_a") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!get_arg_uint64(&config.addr_filter.addr2_a, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--filter:addr2_b") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!get_arg_uint64(&config.addr_filter.addr2_b, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--filter:addr3_cfg") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!ptxed_addr_cfg(&config, 3, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--filter:addr3_a") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!get_arg_uint64(&config.addr_filter.addr3_a, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--filter:addr3_b") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before --pt.\n", prog, arg); goto err; } if (!get_arg_uint64(&config.addr_filter.addr3_b, arg, argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--check") == 0) { options.check = 1; continue; } if (strcmp(arg, "--iscache-limit") == 0) { uint64_t limit; if (!get_arg_uint64(&limit, arg, argv[i++], prog)) goto err; errcode = pt_iscache_set_limit(decoder.iscache, limit); if (errcode < 0) { fprintf(stderr, "%s: error setting iscache " "limit: %s.\n", prog, pt_errstr(pt_errcode(errcode))); goto err; } continue; } if (strcmp(arg, "--stat") == 0) { options.print_stats = 1; continue; } if (strcmp(arg, "--stat:insn") == 0) { options.print_stats = 1; stats.flags |= ptxed_stat_insn; continue; } if (strcmp(arg, "--stat:blocks") == 0) { options.print_stats = 1; stats.flags |= ptxed_stat_blocks; continue; } #if defined(FEATURE_SIDEBAND) if ((strcmp(arg, "--sb:compact") == 0) || (strcmp(arg, "--sb") == 0)) { options.sb_dump_flags &= ~(uint32_t) ptsbp_verbose; options.sb_dump_flags |= (uint32_t) ptsbp_compact; continue; } if (strcmp(arg, "--sb:verbose") == 0) { options.sb_dump_flags &= ~(uint32_t) ptsbp_compact; options.sb_dump_flags |= (uint32_t) ptsbp_verbose; continue; } if (strcmp(arg, "--sb:filename") == 0) { options.sb_dump_flags |= (uint32_t) ptsbp_filename; continue; } if (strcmp(arg, "--sb:offset") == 0) { options.sb_dump_flags |= (uint32_t) ptsbp_file_offset; continue; } if (strcmp(arg, "--sb:time") == 0) { options.sb_dump_flags |= (uint32_t) ptsbp_tsc; continue; } if (strcmp(arg, "--sb:switch") == 0) { pt_sb_notify_switch(decoder.session, ptxed_print_switch, &options); continue; } if (strcmp(arg, "--sb:warn") == 0) { options.print_sb_warnings = 1; continue; } #if defined(FEATURE_PEVENT) if (strcmp(arg, "--pevent:primary") == 0) { arg = argv[i++]; if (!arg) { fprintf(stderr, "%s: --pevent:primary: " "missing argument.\n", prog); goto err; } decoder.pevent.primary = 1; errcode = ptxed_sb_pevent(&decoder, arg, prog); if (errcode < 0) goto err; continue; } if (strcmp(arg, "--pevent:secondary") == 0) { arg = argv[i++]; if (!arg) { fprintf(stderr, "%s: --pevent:secondary: " "missing argument.\n", prog); goto err; } decoder.pevent.primary = 0; errcode = ptxed_sb_pevent(&decoder, arg, prog); if (errcode < 0) goto err; continue; } if (strcmp(arg, "--pevent:sample-type") == 0) { if (!get_arg_uint64(&decoder.pevent.sample_type, "--pevent:sample-type", argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--pevent:time-zero") == 0) { if (!get_arg_uint64(&decoder.pevent.time_zero, "--pevent:time-zero", argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--pevent:time-shift") == 0) { if (!get_arg_uint16(&decoder.pevent.time_shift, "--pevent:time-shift", argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--pevent:time-mult") == 0) { if (!get_arg_uint32(&decoder.pevent.time_mult, "--pevent:time-mult", argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--pevent:tsc-offset") == 0) { if (!get_arg_uint64(&decoder.pevent.tsc_offset, "--pevent:tsc-offset", argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--pevent:kernel-start") == 0) { if (!get_arg_uint64(&decoder.pevent.kernel_start, "--pevent:kernel-start", argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--pevent:sysroot") == 0) { arg = argv[i++]; if (!arg) { fprintf(stderr, "%s: --pevent:sysroot: " "missing argument.\n", prog); goto err; } decoder.pevent.sysroot = arg; continue; } #if defined(FEATURE_ELF) if (strcmp(arg, "--pevent:kcore") == 0) { struct pt_image *kernel; uint64_t base; arg = argv[i++]; if (!arg) { fprintf(stderr, "%s: --pevent:kcore: " "missing argument.\n", prog); goto err; } base = 0ull; errcode = extract_base(arg, &base); if (errcode < 0) goto err; kernel = pt_sb_kernel_image(decoder.session); errcode = load_elf(decoder.iscache, kernel, arg, base, prog, options.track_image); if (errcode < 0) goto err; continue; } #endif /* defined(FEATURE_ELF) */ if (strcmp(arg, "--pevent:vdso-x64") == 0) { arg = argv[i++]; if (!arg) { fprintf(stderr, "%s: --pevent:vdso-x64: " "missing argument.\n", prog); goto err; } decoder.pevent.vdso_x64 = arg; continue; } if (strcmp(arg, "--pevent:vdso-x32") == 0) { arg = argv[i++]; if (!arg) { fprintf(stderr, "%s: --pevent:vdso-x32: " "missing argument.\n", prog); goto err; } decoder.pevent.vdso_x32 = arg; continue; } if (strcmp(arg, "--pevent:vdso-ia32") == 0) { arg = argv[i++]; if (!arg) { fprintf(stderr, "%s: --pevent:vdso-ia32: " "missing argument.\n", prog); goto err; } decoder.pevent.vdso_ia32 = arg; continue; } #endif /* defined(FEATURE_PEVENT) */ #endif /* defined(FEATURE_SIDEBAND) */ if (strcmp(arg, "--cpu") == 0) { /* override cpu information before the decoder * is initialized. */ if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify cpu before the pt source file.\n", prog); goto err; } if (argc <= i) { fprintf(stderr, "%s: --cpu: missing argument.\n", prog); goto out; } arg = argv[i++]; if (strcmp(arg, "none") == 0) { memset(&config.cpu, 0, sizeof(config.cpu)); continue; } errcode = pt_cpu_parse(&config.cpu, arg); if (errcode < 0) { fprintf(stderr, "%s: cpu must be specified as f/m[/s]\n", prog); goto err; } continue; } if (strcmp(arg, "--mtc-freq") == 0) { if (!get_arg_uint8(&config.mtc_freq, "--mtc-freq", argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--nom-freq") == 0) { if (!get_arg_uint8(&config.nom_freq, "--nom-freq", argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--cpuid-0x15.eax") == 0) { if (!get_arg_uint32(&config.cpuid_0x15_eax, "--cpuid-0x15.eax", argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--cpuid-0x15.ebx") == 0) { if (!get_arg_uint32(&config.cpuid_0x15_ebx, "--cpuid-0x15.ebx", argv[i++], prog)) goto err; continue; } if (strcmp(arg, "--verbose") == 0 || strcmp(arg, "-v") == 0) { options.track_image = 1; continue; } if (strcmp(arg, "--insn-decoder") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before the pt " "source file.\n", arg, prog); goto err; } decoder.type = pdt_insn_decoder; continue; } if (strcmp(arg, "--insn:keep-tcal-on-ovf") == 0) { decoder.insn.flags.variant.insn.keep_tcal_on_ovf = 1; continue; } if (strcmp(arg, "--block-decoder") == 0) { if (ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: please specify %s before the pt " "source file.\n", arg, prog); goto err; } decoder.type = pdt_block_decoder; continue; } if (strcmp(arg, "--block:show-blocks") == 0) { options.track_blocks = 1; continue; } if (strcmp(arg, "--block:end-on-call") == 0) { decoder.block.flags.variant.block.end_on_call = 1; continue; } if (strcmp(arg, "--block:end-on-jump") == 0) { decoder.block.flags.variant.block.end_on_jump = 1; continue; } if (strcmp(arg, "--block:keep-tcal-on-ovf") == 0) { decoder.block.flags.variant.block.keep_tcal_on_ovf = 1; continue; } fprintf(stderr, "%s: unknown option: %s.\n", prog, arg); goto err; } if (!ptxed_have_decoder(&decoder)) { fprintf(stderr, "%s: no pt file.\n", prog); goto err; } xed_tables_init(); /* If we didn't select any statistics, select them all depending on the * decoder type. */ if (options.print_stats && !stats.flags) { stats.flags |= ptxed_stat_insn; if (decoder.type == pdt_block_decoder) stats.flags |= ptxed_stat_blocks; } #if defined(FEATURE_SIDEBAND) errcode = pt_sb_init_decoders(decoder.session); if (errcode < 0) { fprintf(stderr, "%s: error initializing sideband decoders: %s.\n", prog, pt_errstr(pt_errcode(errcode))); goto err; } #endif /* defined(FEATURE_SIDEBAND) */ decode(&decoder, &options, options.print_stats ? &stats : NULL); if (options.print_stats) print_stats(&stats); out: ptxed_free_decoder(&decoder); pt_image_free(image); free(config.begin); return 0; err: ptxed_free_decoder(&decoder); pt_image_free(image); free(config.begin); return 1; }
608396.c
/* Simple DirectMedia Layer Copyright (C) 1997-2021 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../../SDL_internal.h" #if SDL_VIDEO_DRIVER_X11 #include "SDL_hints.h" #include "../SDL_sysvideo.h" #include "../SDL_pixels_c.h" #include "../../events/SDL_keyboard_c.h" #include "../../events/SDL_mouse_c.h" #include "SDL_x11video.h" #include "SDL_x11mouse.h" #include "SDL_x11shape.h" #include "SDL_x11xinput2.h" #if SDL_VIDEO_OPENGL_EGL #include "SDL_x11opengles.h" #endif #include "SDL_timer.h" #include "SDL_syswm.h" #define _NET_WM_STATE_REMOVE 0l #define _NET_WM_STATE_ADD 1l static Bool isMapNotify(Display *dpy, XEvent *ev, XPointer win) { return ev->type == MapNotify && ev->xmap.window == *((Window*)win); } static Bool isUnmapNotify(Display *dpy, XEvent *ev, XPointer win) { return ev->type == UnmapNotify && ev->xunmap.window == *((Window*)win); } /* static Bool isConfigureNotify(Display *dpy, XEvent *ev, XPointer win) { return ev->type == ConfigureNotify && ev->xconfigure.window == *((Window*)win); } static Bool X11_XIfEventTimeout(Display *display, XEvent *event_return, Bool (*predicate)(), XPointer arg, int timeoutMS) { Uint32 start = SDL_GetTicks(); while (!X11_XCheckIfEvent(display, event_return, predicate, arg)) { if (SDL_TICKS_PASSED(SDL_GetTicks(), start + timeoutMS)) { return False; } } return True; } */ static SDL_bool X11_IsWindowLegacyFullscreen(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; return (data->fswindow != 0); } static SDL_bool X11_IsWindowMapped(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; SDL_VideoData *videodata = (SDL_VideoData *) _this->driverdata; XWindowAttributes attr; X11_XGetWindowAttributes(videodata->display, data->xwindow, &attr); if (attr.map_state != IsUnmapped) { return SDL_TRUE; } else { return SDL_FALSE; } } #if 0 static SDL_bool X11_IsActionAllowed(SDL_Window *window, Atom action) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Atom _NET_WM_ALLOWED_ACTIONS = data->videodata->_NET_WM_ALLOWED_ACTIONS; Atom type; Display *display = data->videodata->display; int form; unsigned long remain; unsigned long len, i; Atom *list; SDL_bool ret = SDL_FALSE; if (X11_XGetWindowProperty(display, data->xwindow, _NET_WM_ALLOWED_ACTIONS, 0, 1024, False, XA_ATOM, &type, &form, &len, &remain, (unsigned char **)&list) == Success) { for (i=0; i<len; ++i) { if (list[i] == action) { ret = SDL_TRUE; break; } } X11_XFree(list); } return ret; } #endif /* 0 */ void X11_SetNetWMState(_THIS, Window xwindow, Uint32 flags) { SDL_VideoData *videodata = (SDL_VideoData *) _this->driverdata; Display *display = videodata->display; /* !!! FIXME: just dereference videodata below instead of copying to locals. */ Atom _NET_WM_STATE = videodata->_NET_WM_STATE; /* Atom _NET_WM_STATE_HIDDEN = videodata->_NET_WM_STATE_HIDDEN; */ Atom _NET_WM_STATE_FOCUSED = videodata->_NET_WM_STATE_FOCUSED; Atom _NET_WM_STATE_MAXIMIZED_VERT = videodata->_NET_WM_STATE_MAXIMIZED_VERT; Atom _NET_WM_STATE_MAXIMIZED_HORZ = videodata->_NET_WM_STATE_MAXIMIZED_HORZ; Atom _NET_WM_STATE_FULLSCREEN = videodata->_NET_WM_STATE_FULLSCREEN; Atom _NET_WM_STATE_ABOVE = videodata->_NET_WM_STATE_ABOVE; Atom _NET_WM_STATE_SKIP_TASKBAR = videodata->_NET_WM_STATE_SKIP_TASKBAR; Atom _NET_WM_STATE_SKIP_PAGER = videodata->_NET_WM_STATE_SKIP_PAGER; Atom atoms[16]; int count = 0; /* The window manager sets this property, we shouldn't set it. If we did, this would indicate to the window manager that we don't actually want to be mapped during X11_XMapRaised(), which would be bad. * if (flags & SDL_WINDOW_HIDDEN) { atoms[count++] = _NET_WM_STATE_HIDDEN; } */ if (flags & SDL_WINDOW_ALWAYS_ON_TOP) { atoms[count++] = _NET_WM_STATE_ABOVE; } if (flags & SDL_WINDOW_SKIP_TASKBAR) { atoms[count++] = _NET_WM_STATE_SKIP_TASKBAR; atoms[count++] = _NET_WM_STATE_SKIP_PAGER; } if (flags & SDL_WINDOW_INPUT_FOCUS) { atoms[count++] = _NET_WM_STATE_FOCUSED; } if (flags & SDL_WINDOW_MAXIMIZED) { atoms[count++] = _NET_WM_STATE_MAXIMIZED_VERT; atoms[count++] = _NET_WM_STATE_MAXIMIZED_HORZ; } if (flags & SDL_WINDOW_FULLSCREEN) { atoms[count++] = _NET_WM_STATE_FULLSCREEN; } SDL_assert(count <= SDL_arraysize(atoms)); if (count > 0) { X11_XChangeProperty(display, xwindow, _NET_WM_STATE, XA_ATOM, 32, PropModeReplace, (unsigned char *)atoms, count); } else { X11_XDeleteProperty(display, xwindow, _NET_WM_STATE); } } Uint32 X11_GetNetWMState(_THIS, Window xwindow) { SDL_VideoData *videodata = (SDL_VideoData *) _this->driverdata; Display *display = videodata->display; Atom _NET_WM_STATE = videodata->_NET_WM_STATE; Atom _NET_WM_STATE_HIDDEN = videodata->_NET_WM_STATE_HIDDEN; Atom _NET_WM_STATE_FOCUSED = videodata->_NET_WM_STATE_FOCUSED; Atom _NET_WM_STATE_MAXIMIZED_VERT = videodata->_NET_WM_STATE_MAXIMIZED_VERT; Atom _NET_WM_STATE_MAXIMIZED_HORZ = videodata->_NET_WM_STATE_MAXIMIZED_HORZ; Atom _NET_WM_STATE_FULLSCREEN = videodata->_NET_WM_STATE_FULLSCREEN; Atom actualType; int actualFormat; unsigned long i, numItems, bytesAfter; unsigned char *propertyValue = NULL; long maxLength = 1024; Uint32 flags = 0; if (X11_XGetWindowProperty(display, xwindow, _NET_WM_STATE, 0l, maxLength, False, XA_ATOM, &actualType, &actualFormat, &numItems, &bytesAfter, &propertyValue) == Success) { Atom *atoms = (Atom *) propertyValue; int maximized = 0; int fullscreen = 0; for (i = 0; i < numItems; ++i) { if (atoms[i] == _NET_WM_STATE_HIDDEN) { flags |= SDL_WINDOW_HIDDEN; } else if (atoms[i] == _NET_WM_STATE_FOCUSED) { flags |= SDL_WINDOW_INPUT_FOCUS; } else if (atoms[i] == _NET_WM_STATE_MAXIMIZED_VERT) { maximized |= 1; } else if (atoms[i] == _NET_WM_STATE_MAXIMIZED_HORZ) { maximized |= 2; } else if ( atoms[i] == _NET_WM_STATE_FULLSCREEN) { fullscreen = 1; } } if (maximized == 3) { flags |= SDL_WINDOW_MAXIMIZED; } if (fullscreen == 1) { flags |= SDL_WINDOW_FULLSCREEN; } /* If the window is unmapped, numItems will be zero and _NET_WM_STATE_HIDDEN * will not be set. Do an additional check to see if the window is unmapped * and mark it as SDL_WINDOW_HIDDEN if it is. */ { XWindowAttributes attr; SDL_memset(&attr,0,sizeof(attr)); X11_XGetWindowAttributes(videodata->display, xwindow, &attr); if (attr.map_state == IsUnmapped) { flags |= SDL_WINDOW_HIDDEN; } } X11_XFree(propertyValue); } /* FIXME, check the size hints for resizable */ /* flags |= SDL_WINDOW_RESIZABLE; */ return flags; } static int SetupWindowData(_THIS, SDL_Window * window, Window w, BOOL created) { SDL_VideoData *videodata = (SDL_VideoData *) _this->driverdata; SDL_WindowData *data; int numwindows = videodata->numwindows; int windowlistlength = videodata->windowlistlength; SDL_WindowData **windowlist = videodata->windowlist; /* Allocate the window data */ data = (SDL_WindowData *) SDL_calloc(1, sizeof(*data)); if (!data) { return SDL_OutOfMemory(); } data->window = window; data->xwindow = w; #ifdef X_HAVE_UTF8_STRING if (SDL_X11_HAVE_UTF8 && videodata->im) { data->ic = X11_XCreateIC(videodata->im, XNClientWindow, w, XNFocusWindow, w, XNInputStyle, XIMPreeditNothing | XIMStatusNothing, NULL); } #endif data->created = created; data->videodata = videodata; /* Associate the data with the window */ if (numwindows < windowlistlength) { windowlist[numwindows] = data; videodata->numwindows++; } else { windowlist = (SDL_WindowData **) SDL_realloc(windowlist, (numwindows + 1) * sizeof(*windowlist)); if (!windowlist) { SDL_free(data); return SDL_OutOfMemory(); } windowlist[numwindows] = data; videodata->numwindows++; videodata->windowlistlength++; videodata->windowlist = windowlist; } /* Fill in the SDL window with the window data */ { XWindowAttributes attrib; X11_XGetWindowAttributes(data->videodata->display, w, &attrib); window->x = attrib.x; window->y = attrib.y; window->w = attrib.width; window->h = attrib.height; if (attrib.map_state != IsUnmapped) { window->flags |= SDL_WINDOW_SHOWN; } else { window->flags &= ~SDL_WINDOW_SHOWN; } data->visual = attrib.visual; data->colormap = attrib.colormap; } window->flags |= X11_GetNetWMState(_this, w); { Window FocalWindow; int RevertTo=0; X11_XGetInputFocus(data->videodata->display, &FocalWindow, &RevertTo); if (FocalWindow==w) { window->flags |= SDL_WINDOW_INPUT_FOCUS; } if (window->flags & SDL_WINDOW_INPUT_FOCUS) { SDL_SetKeyboardFocus(data->window); } if (window->flags & SDL_WINDOW_MOUSE_GRABBED) { /* Tell x11 to clip mouse */ } } /* All done! */ window->driverdata = data; return 0; } static void SetWindowBordered(Display *display, int screen, Window window, SDL_bool border) { /* * this code used to check for KWM_WIN_DECORATION, but KDE hasn't * supported it for years and years. It now respects _MOTIF_WM_HINTS. * Gnome is similar: just use the Motif atom. */ Atom WM_HINTS = X11_XInternAtom(display, "_MOTIF_WM_HINTS", True); if (WM_HINTS != None) { /* Hints used by Motif compliant window managers */ struct { unsigned long flags; unsigned long functions; unsigned long decorations; long input_mode; unsigned long status; } MWMHints = { (1L << 1), 0, border ? 1 : 0, 0, 0 }; X11_XChangeProperty(display, window, WM_HINTS, WM_HINTS, 32, PropModeReplace, (unsigned char *) &MWMHints, sizeof(MWMHints) / sizeof(long)); } else { /* set the transient hints instead, if necessary */ X11_XSetTransientForHint(display, window, RootWindow(display, screen)); } } int X11_CreateWindow(_THIS, SDL_Window * window) { SDL_VideoData *data = (SDL_VideoData *) _this->driverdata; SDL_DisplayData *displaydata = (SDL_DisplayData *) SDL_GetDisplayForWindow(window)->driverdata; SDL_WindowData *windowdata; Display *display = data->display; int screen = displaydata->screen; Visual *visual; int depth; XSetWindowAttributes xattr; Window w; XSizeHints *sizehints; XWMHints *wmhints; XClassHint *classhints; Atom _NET_WM_BYPASS_COMPOSITOR; Atom _NET_WM_WINDOW_TYPE; Atom wintype; const char *wintype_name = NULL; long compositor = 1; Atom _NET_WM_PID; long fevent = 0; #if SDL_VIDEO_OPENGL_GLX || SDL_VIDEO_OPENGL_EGL const char *forced_visual_id = SDL_GetHint(SDL_HINT_VIDEO_X11_WINDOW_VISUALID); if (forced_visual_id != NULL && forced_visual_id[0] != '\0') { XVisualInfo *vi, template; int nvis; SDL_zero(template); template.visualid = SDL_strtol(forced_visual_id, NULL, 0); vi = X11_XGetVisualInfo(display, VisualIDMask, &template, &nvis); if (vi) { visual = vi->visual; depth = vi->depth; X11_XFree(vi); } else { return -1; } } else if ((window->flags & SDL_WINDOW_OPENGL) && !SDL_getenv("SDL_VIDEO_X11_VISUALID")) { XVisualInfo *vinfo = NULL; #if SDL_VIDEO_OPENGL_EGL if (((_this->gl_config.profile_mask == SDL_GL_CONTEXT_PROFILE_ES) || SDL_GetHintBoolean(SDL_HINT_VIDEO_X11_FORCE_EGL, SDL_FALSE)) #if SDL_VIDEO_OPENGL_GLX && ( !_this->gl_data || X11_GL_UseEGL(_this) ) #endif ) { vinfo = X11_GLES_GetVisual(_this, display, screen); } else #endif { #if SDL_VIDEO_OPENGL_GLX vinfo = X11_GL_GetVisual(_this, display, screen); #endif } if (!vinfo) { return -1; } visual = vinfo->visual; depth = vinfo->depth; X11_XFree(vinfo); } else #endif { visual = displaydata->visual; depth = displaydata->depth; } xattr.override_redirect = ((window->flags & SDL_WINDOW_TOOLTIP) || (window->flags & SDL_WINDOW_POPUP_MENU)) ? True : False; xattr.background_pixmap = None; xattr.border_pixel = 0; if (visual->class == DirectColor) { XColor *colorcells; int i; int ncolors; int rmax, gmax, bmax; int rmask, gmask, bmask; int rshift, gshift, bshift; xattr.colormap = X11_XCreateColormap(display, RootWindow(display, screen), visual, AllocAll); /* If we can't create a colormap, then we must die */ if (!xattr.colormap) { return SDL_SetError("Could not create writable colormap"); } /* OK, we got a colormap, now fill it in as best as we can */ colorcells = SDL_malloc(visual->map_entries * sizeof(XColor)); if (!colorcells) { return SDL_OutOfMemory(); } ncolors = visual->map_entries; rmax = 0xffff; gmax = 0xffff; bmax = 0xffff; rshift = 0; rmask = visual->red_mask; while (0 == (rmask & 1)) { rshift++; rmask >>= 1; } gshift = 0; gmask = visual->green_mask; while (0 == (gmask & 1)) { gshift++; gmask >>= 1; } bshift = 0; bmask = visual->blue_mask; while (0 == (bmask & 1)) { bshift++; bmask >>= 1; } /* build the color table pixel values */ for (i = 0; i < ncolors; i++) { Uint32 red = (rmax * i) / (ncolors - 1); Uint32 green = (gmax * i) / (ncolors - 1); Uint32 blue = (bmax * i) / (ncolors - 1); Uint32 rbits = (rmask * i) / (ncolors - 1); Uint32 gbits = (gmask * i) / (ncolors - 1); Uint32 bbits = (bmask * i) / (ncolors - 1); Uint32 pix = (rbits << rshift) | (gbits << gshift) | (bbits << bshift); colorcells[i].pixel = pix; colorcells[i].red = red; colorcells[i].green = green; colorcells[i].blue = blue; colorcells[i].flags = DoRed | DoGreen | DoBlue; } X11_XStoreColors(display, xattr.colormap, colorcells, ncolors); SDL_free(colorcells); } else { xattr.colormap = X11_XCreateColormap(display, RootWindow(display, screen), visual, AllocNone); } w = X11_XCreateWindow(display, RootWindow(display, screen), window->x, window->y, window->w, window->h, 0, depth, InputOutput, visual, (CWOverrideRedirect | CWBackPixmap | CWBorderPixel | CWColormap), &xattr); if (!w) { return SDL_SetError("Couldn't create window"); } SetWindowBordered(display, screen, w, (window->flags & SDL_WINDOW_BORDERLESS) == 0); sizehints = X11_XAllocSizeHints(); /* Setup the normal size hints */ sizehints->flags = 0; if (!(window->flags & SDL_WINDOW_RESIZABLE)) { sizehints->min_width = sizehints->max_width = window->w; sizehints->min_height = sizehints->max_height = window->h; sizehints->flags |= (PMaxSize | PMinSize); } sizehints->x = window->x; sizehints->y = window->y; sizehints->flags |= USPosition; /* Setup the input hints so we get keyboard input */ wmhints = X11_XAllocWMHints(); wmhints->input = True; wmhints->window_group = data->window_group; wmhints->flags = InputHint | WindowGroupHint; /* Setup the class hints so we can get an icon (AfterStep) */ classhints = X11_XAllocClassHint(); classhints->res_name = data->classname; classhints->res_class = data->classname; /* Set the size, input and class hints, and define WM_CLIENT_MACHINE and WM_LOCALE_NAME */ X11_XSetWMProperties(display, w, NULL, NULL, NULL, 0, sizehints, wmhints, classhints); X11_XFree(sizehints); X11_XFree(wmhints); X11_XFree(classhints); /* Set the PID related to the window for the given hostname, if possible */ if (data->pid > 0) { long pid = (long) data->pid; _NET_WM_PID = X11_XInternAtom(display, "_NET_WM_PID", False); X11_XChangeProperty(display, w, _NET_WM_PID, XA_CARDINAL, 32, PropModeReplace, (unsigned char *) &pid, 1); } /* Set the window manager state */ X11_SetNetWMState(_this, w, window->flags); compositor = 2; /* don't disable compositing except for "normal" windows */ if (window->flags & SDL_WINDOW_UTILITY) { wintype_name = "_NET_WM_WINDOW_TYPE_UTILITY"; } else if (window->flags & SDL_WINDOW_TOOLTIP) { wintype_name = "_NET_WM_WINDOW_TYPE_TOOLTIP"; } else if (window->flags & SDL_WINDOW_POPUP_MENU) { wintype_name = "_NET_WM_WINDOW_TYPE_POPUP_MENU"; } else { wintype_name = "_NET_WM_WINDOW_TYPE_NORMAL"; compositor = 1; /* disable compositing for "normal" windows */ } /* Let the window manager know what type of window we are. */ _NET_WM_WINDOW_TYPE = X11_XInternAtom(display, "_NET_WM_WINDOW_TYPE", False); wintype = X11_XInternAtom(display, wintype_name, False); X11_XChangeProperty(display, w, _NET_WM_WINDOW_TYPE, XA_ATOM, 32, PropModeReplace, (unsigned char *)&wintype, 1); if (SDL_GetHintBoolean(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, SDL_TRUE)) { _NET_WM_BYPASS_COMPOSITOR = X11_XInternAtom(display, "_NET_WM_BYPASS_COMPOSITOR", False); X11_XChangeProperty(display, w, _NET_WM_BYPASS_COMPOSITOR, XA_CARDINAL, 32, PropModeReplace, (unsigned char *)&compositor, 1); } { Atom protocols[3]; int proto_count = 0; protocols[proto_count++] = data->WM_DELETE_WINDOW; /* Allow window to be deleted by the WM */ protocols[proto_count++] = data->WM_TAKE_FOCUS; /* Since we will want to set input focus explicitly */ /* Default to using ping if there is no hint */ if (SDL_GetHintBoolean(SDL_HINT_VIDEO_X11_NET_WM_PING, SDL_TRUE)) { protocols[proto_count++] = data->_NET_WM_PING; /* Respond so WM knows we're alive */ } SDL_assert(proto_count <= sizeof(protocols) / sizeof(protocols[0])); X11_XSetWMProtocols(display, w, protocols, proto_count); } if (SetupWindowData(_this, window, w, SDL_TRUE) < 0) { X11_XDestroyWindow(display, w); return -1; } windowdata = (SDL_WindowData *) window->driverdata; #if SDL_VIDEO_OPENGL_ES || SDL_VIDEO_OPENGL_ES2 || SDL_VIDEO_OPENGL_EGL if ((window->flags & SDL_WINDOW_OPENGL) && ((_this->gl_config.profile_mask == SDL_GL_CONTEXT_PROFILE_ES) || SDL_GetHintBoolean(SDL_HINT_VIDEO_X11_FORCE_EGL, SDL_FALSE)) #if SDL_VIDEO_OPENGL_GLX && ( !_this->gl_data || X11_GL_UseEGL(_this) ) #endif ) { #if SDL_VIDEO_OPENGL_EGL if (!_this->egl_data) { return -1; } /* Create the GLES window surface */ windowdata->egl_surface = SDL_EGL_CreateSurface(_this, (NativeWindowType) w); if (windowdata->egl_surface == EGL_NO_SURFACE) { return SDL_SetError("Could not create GLES window surface"); } #else return SDL_SetError("Could not create GLES window surface (EGL support not configured)"); #endif /* SDL_VIDEO_OPENGL_EGL */ } #endif #ifdef X_HAVE_UTF8_STRING if (SDL_X11_HAVE_UTF8 && windowdata->ic) { X11_XGetICValues(windowdata->ic, XNFilterEvents, &fevent, NULL); } #endif X11_Xinput2SelectTouch(_this, window); X11_XSelectInput(display, w, (FocusChangeMask | EnterWindowMask | LeaveWindowMask | ExposureMask | ButtonPressMask | ButtonReleaseMask | PointerMotionMask | KeyPressMask | KeyReleaseMask | PropertyChangeMask | StructureNotifyMask | KeymapStateMask | fevent)); X11_XFlush(display); return 0; } int X11_CreateWindowFrom(_THIS, SDL_Window * window, const void *data) { Window w = (Window) data; window->title = X11_GetWindowTitle(_this, w); if (SetupWindowData(_this, window, w, SDL_FALSE) < 0) { return -1; } return 0; } char * X11_GetWindowTitle(_THIS, Window xwindow) { SDL_VideoData *data = (SDL_VideoData *) _this->driverdata; Display *display = data->display; int status, real_format; Atom real_type; unsigned long items_read, items_left; unsigned char *propdata; char *title = NULL; status = X11_XGetWindowProperty(display, xwindow, data->_NET_WM_NAME, 0L, 8192L, False, data->UTF8_STRING, &real_type, &real_format, &items_read, &items_left, &propdata); if (status == Success && propdata) { title = SDL_strdup(SDL_static_cast(char*, propdata)); X11_XFree(propdata); } else { status = X11_XGetWindowProperty(display, xwindow, XA_WM_NAME, 0L, 8192L, False, XA_STRING, &real_type, &real_format, &items_read, &items_left, &propdata); if (status == Success && propdata) { title = SDL_iconv_string("UTF-8", "", SDL_static_cast(char*, propdata), items_read+1); X11_XFree(propdata); } else { title = SDL_strdup(""); } } return title; } void X11_SetWindowTitle(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; XTextProperty titleprop; Status status; const char *title = window->title ? window->title : ""; char *title_locale = NULL; #ifdef X_HAVE_UTF8_STRING Atom _NET_WM_NAME = data->videodata->_NET_WM_NAME; #endif title_locale = SDL_iconv_utf8_locale(title); if (!title_locale) { SDL_OutOfMemory(); return; } status = X11_XStringListToTextProperty(&title_locale, 1, &titleprop); SDL_free(title_locale); if (status) { X11_XSetTextProperty(display, data->xwindow, &titleprop, XA_WM_NAME); X11_XFree(titleprop.value); } #ifdef X_HAVE_UTF8_STRING if (SDL_X11_HAVE_UTF8) { status = X11_Xutf8TextListToTextProperty(display, (char **) &title, 1, XUTF8StringStyle, &titleprop); if (status == Success) { X11_XSetTextProperty(display, data->xwindow, &titleprop, _NET_WM_NAME); X11_XFree(titleprop.value); } } #endif X11_XFlush(display); } void X11_SetWindowIcon(_THIS, SDL_Window * window, SDL_Surface * icon) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; Atom _NET_WM_ICON = data->videodata->_NET_WM_ICON; if (icon) { int propsize; long *propdata; /* Set the _NET_WM_ICON property */ SDL_assert(icon->format->format == SDL_PIXELFORMAT_ARGB8888); propsize = 2 + (icon->w * icon->h); propdata = SDL_malloc(propsize * sizeof(long)); if (propdata) { int x, y; Uint32 *src; long *dst; propdata[0] = icon->w; propdata[1] = icon->h; dst = &propdata[2]; for (y = 0; y < icon->h; ++y) { src = (Uint32*)((Uint8*)icon->pixels + y * icon->pitch); for (x = 0; x < icon->w; ++x) { *dst++ = *src++; } } X11_XChangeProperty(display, data->xwindow, _NET_WM_ICON, XA_CARDINAL, 32, PropModeReplace, (unsigned char *) propdata, propsize); } SDL_free(propdata); } else { X11_XDeleteProperty(display, data->xwindow, _NET_WM_ICON); } X11_XFlush(display); } void X11_SetWindowPosition(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; unsigned int childCount; Window childReturn, root, parent; Window* children; XWindowAttributes attrs; int orig_x, orig_y; Uint32 timeout; X11_XSync(display, False); X11_XQueryTree(display, data->xwindow, &root, &parent, &children, &childCount); X11_XGetWindowAttributes(display, data->xwindow, &attrs); X11_XTranslateCoordinates(display, parent, DefaultRootWindow(display), attrs.x, attrs.y, &orig_x, &orig_y, &childReturn); /*Attempt to move the window*/ X11_XMoveWindow(display, data->xwindow, window->x - data->border_left, window->y - data->border_top); /* Wait a brief time to see if the window manager decided to let this move happen. If the window changes at all, even to an unexpected value, we break out. */ timeout = SDL_GetTicks() + 100; while (SDL_TRUE) { int x, y; X11_XSync(display, False); X11_XGetWindowAttributes(display, data->xwindow, &attrs); X11_XTranslateCoordinates(display, parent, DefaultRootWindow(display), attrs.x, attrs.y, &x, &y, &childReturn); if ((x != orig_x) || (y != orig_y)) { window->x = x; window->y = y; break; /* window moved, time to go. */ } else if ((x == window->x) && (y == window->y)) { break; /* we're at the place we wanted to be anyhow, drop out. */ } if (SDL_TICKS_PASSED(SDL_GetTicks(), timeout)) { break; } SDL_Delay(10); } } void X11_SetWindowMinimumSize(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; if (window->flags & SDL_WINDOW_RESIZABLE) { XSizeHints *sizehints = X11_XAllocSizeHints(); long userhints; X11_XGetWMNormalHints(display, data->xwindow, sizehints, &userhints); sizehints->min_width = window->min_w; sizehints->min_height = window->min_h; sizehints->flags |= PMinSize; X11_XSetWMNormalHints(display, data->xwindow, sizehints); X11_XFree(sizehints); /* See comment in X11_SetWindowSize. */ X11_XResizeWindow(display, data->xwindow, window->w, window->h); X11_XMoveWindow(display, data->xwindow, window->x - data->border_left, window->y - data->border_top); X11_XRaiseWindow(display, data->xwindow); } X11_XFlush(display); } void X11_SetWindowMaximumSize(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; if (window->flags & SDL_WINDOW_RESIZABLE) { XSizeHints *sizehints = X11_XAllocSizeHints(); long userhints; X11_XGetWMNormalHints(display, data->xwindow, sizehints, &userhints); sizehints->max_width = window->max_w; sizehints->max_height = window->max_h; sizehints->flags |= PMaxSize; X11_XSetWMNormalHints(display, data->xwindow, sizehints); X11_XFree(sizehints); /* See comment in X11_SetWindowSize. */ X11_XResizeWindow(display, data->xwindow, window->w, window->h); X11_XMoveWindow(display, data->xwindow, window->x - data->border_left, window->y - data->border_top); X11_XRaiseWindow(display, data->xwindow); } X11_XFlush(display); } void X11_SetWindowSize(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; XWindowAttributes attrs; int orig_w, orig_h; Uint32 timeout; X11_XSync(display, False); X11_XGetWindowAttributes(display, data->xwindow, &attrs); orig_w = attrs.width; orig_h = attrs.height; if (SDL_IsShapedWindow(window)) { X11_ResizeWindowShape(window); } if (!(window->flags & SDL_WINDOW_RESIZABLE)) { /* Apparently, if the X11 Window is set to a 'non-resizable' window, you cannot resize it using the X11_XResizeWindow, thus we must set the size hints to adjust the window size. */ XSizeHints *sizehints = X11_XAllocSizeHints(); long userhints; X11_XGetWMNormalHints(display, data->xwindow, sizehints, &userhints); sizehints->min_width = sizehints->max_width = window->w; sizehints->min_height = sizehints->max_height = window->h; sizehints->flags |= PMinSize | PMaxSize; X11_XSetWMNormalHints(display, data->xwindow, sizehints); X11_XFree(sizehints); /* From Pierre-Loup: WMs each have their little quirks with that. When you change the size hints, they get a ConfigureNotify event with the WM_NORMAL_SIZE_HINTS Atom. They all save the hints then, but they don't all resize the window right away to enforce the new hints. Some of them resize only after: - A user-initiated move or resize - A code-initiated move or resize - Hiding & showing window (Unmap & map) The following move & resize seems to help a lot of WMs that didn't properly update after the hints were changed. We don't do a hide/show, because there are supposedly subtle problems with doing so and transitioning from windowed to fullscreen in Unity. */ X11_XResizeWindow(display, data->xwindow, window->w, window->h); X11_XMoveWindow(display, data->xwindow, window->x - data->border_left, window->y - data->border_top); X11_XRaiseWindow(display, data->xwindow); } else { X11_XResizeWindow(display, data->xwindow, window->w, window->h); } /* Wait a brief time to see if the window manager decided to let this resize happen. If the window changes at all, even to an unexpected value, we break out. */ timeout = SDL_GetTicks() + 100; while (SDL_TRUE) { X11_XSync(display, False); X11_XGetWindowAttributes(display, data->xwindow, &attrs); if ((attrs.width != orig_w) || (attrs.height != orig_h)) { window->w = attrs.width; window->h = attrs.height; break; /* window changed, time to go. */ } else if ((attrs.width == window->w) && (attrs.height == window->h)) { break; /* we're at the place we wanted to be anyhow, drop out. */ } if (SDL_TICKS_PASSED(SDL_GetTicks(), timeout)) { break; } SDL_Delay(10); } } int X11_GetWindowBordersSize(_THIS, SDL_Window * window, int *top, int *left, int *bottom, int *right) { SDL_WindowData *data = (SDL_WindowData *)window->driverdata; *left = data->border_left; *right = data->border_right; *top = data->border_top; *bottom = data->border_bottom; return 0; } int X11_SetWindowOpacity(_THIS, SDL_Window * window, float opacity) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; Atom _NET_WM_WINDOW_OPACITY = data->videodata->_NET_WM_WINDOW_OPACITY; if (opacity == 1.0f) { X11_XDeleteProperty(display, data->xwindow, _NET_WM_WINDOW_OPACITY); } else { const Uint32 FullyOpaque = 0xFFFFFFFF; const long alpha = (long) ((double)opacity * (double)FullyOpaque); X11_XChangeProperty(display, data->xwindow, _NET_WM_WINDOW_OPACITY, XA_CARDINAL, 32, PropModeReplace, (unsigned char *)&alpha, 1); } return 0; } int X11_SetWindowModalFor(_THIS, SDL_Window * modal_window, SDL_Window * parent_window) { SDL_WindowData *data = (SDL_WindowData *) modal_window->driverdata; SDL_WindowData *parent_data = (SDL_WindowData *) parent_window->driverdata; Display *display = data->videodata->display; X11_XSetTransientForHint(display, data->xwindow, parent_data->xwindow); return 0; } int X11_SetWindowInputFocus(_THIS, SDL_Window * window) { if (X11_IsWindowMapped(_this, window)) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; X11_XSetInputFocus(display, data->xwindow, RevertToNone, CurrentTime); X11_XFlush(display); return 0; } return -1; } void X11_SetWindowBordered(_THIS, SDL_Window * window, SDL_bool bordered) { const SDL_bool focused = ((window->flags & SDL_WINDOW_INPUT_FOCUS) != 0); const SDL_bool visible = ((window->flags & SDL_WINDOW_HIDDEN) == 0); SDL_WindowData *data = (SDL_WindowData *) window->driverdata; SDL_DisplayData *displaydata = (SDL_DisplayData *) SDL_GetDisplayForWindow(window)->driverdata; Display *display = data->videodata->display; XEvent event; SetWindowBordered(display, displaydata->screen, data->xwindow, bordered); X11_XFlush(display); if (visible) { XWindowAttributes attr; do { X11_XSync(display, False); X11_XGetWindowAttributes(display, data->xwindow, &attr); } while (attr.map_state != IsViewable); if (focused) { X11_XSetInputFocus(display, data->xwindow, RevertToParent, CurrentTime); } } /* make sure these don't make it to the real event queue if they fired here. */ X11_XSync(display, False); X11_XCheckIfEvent(display, &event, &isUnmapNotify, (XPointer)&data->xwindow); X11_XCheckIfEvent(display, &event, &isMapNotify, (XPointer)&data->xwindow); } void X11_SetWindowResizable(_THIS, SDL_Window * window, SDL_bool resizable) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; XSizeHints *sizehints = X11_XAllocSizeHints(); long userhints; X11_XGetWMNormalHints(display, data->xwindow, sizehints, &userhints); if (resizable) { /* FIXME: Is there a better way to get max window size from X? -flibit */ const int maxsize = 0x7FFFFFFF; sizehints->min_width = window->min_w; sizehints->min_height = window->min_h; sizehints->max_width = (window->max_w == 0) ? maxsize : window->max_w; sizehints->max_height = (window->max_h == 0) ? maxsize : window->max_h; } else { sizehints->min_width = window->w; sizehints->min_height = window->h; sizehints->max_width = window->w; sizehints->max_height = window->h; } sizehints->flags |= PMinSize | PMaxSize; X11_XSetWMNormalHints(display, data->xwindow, sizehints); X11_XFree(sizehints); /* See comment in X11_SetWindowSize. */ X11_XResizeWindow(display, data->xwindow, window->w, window->h); X11_XMoveWindow(display, data->xwindow, window->x - data->border_left, window->y - data->border_top); X11_XRaiseWindow(display, data->xwindow); X11_XFlush(display); } void X11_ShowWindow(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; XEvent event; if (!X11_IsWindowMapped(_this, window)) { X11_XMapRaised(display, data->xwindow); /* Blocking wait for "MapNotify" event. * We use X11_XIfEvent because pXWindowEvent takes a mask rather than a type, * and XCheckTypedWindowEvent doesn't block */ if(!(window->flags & SDL_WINDOW_FOREIGN)) X11_XIfEvent(display, &event, &isMapNotify, (XPointer)&data->xwindow); X11_XFlush(display); } if (!data->videodata->net_wm) { /* no WM means no FocusIn event, which confuses us. Force it. */ X11_XSetInputFocus(display, data->xwindow, RevertToNone, CurrentTime); X11_XFlush(display); } } void X11_HideWindow(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; SDL_DisplayData *displaydata = (SDL_DisplayData *) SDL_GetDisplayForWindow(window)->driverdata; Display *display = data->videodata->display; XEvent event; if (X11_IsWindowMapped(_this, window)) { X11_XWithdrawWindow(display, data->xwindow, displaydata->screen); /* Blocking wait for "UnmapNotify" event */ if(!(window->flags & SDL_WINDOW_FOREIGN)) X11_XIfEvent(display, &event, &isUnmapNotify, (XPointer)&data->xwindow); X11_XFlush(display); } } static void SetWindowActive(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; SDL_DisplayData *displaydata = (SDL_DisplayData *) SDL_GetDisplayForWindow(window)->driverdata; Display *display = data->videodata->display; Atom _NET_ACTIVE_WINDOW = data->videodata->_NET_ACTIVE_WINDOW; if (X11_IsWindowMapped(_this, window)) { XEvent e; /*printf("SDL Window %p: sending _NET_ACTIVE_WINDOW with timestamp %lu\n", window, data->user_time);*/ SDL_zero(e); e.xany.type = ClientMessage; e.xclient.message_type = _NET_ACTIVE_WINDOW; e.xclient.format = 32; e.xclient.window = data->xwindow; e.xclient.data.l[0] = 1; /* source indication. 1 = application */ e.xclient.data.l[1] = data->user_time; e.xclient.data.l[2] = 0; X11_XSendEvent(display, RootWindow(display, displaydata->screen), 0, SubstructureNotifyMask | SubstructureRedirectMask, &e); X11_XFlush(display); } } void X11_RaiseWindow(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; X11_XRaiseWindow(display, data->xwindow); SetWindowActive(_this, window); X11_XFlush(display); } static void SetWindowMaximized(_THIS, SDL_Window * window, SDL_bool maximized) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; SDL_DisplayData *displaydata = (SDL_DisplayData *) SDL_GetDisplayForWindow(window)->driverdata; Display *display = data->videodata->display; Atom _NET_WM_STATE = data->videodata->_NET_WM_STATE; Atom _NET_WM_STATE_MAXIMIZED_VERT = data->videodata->_NET_WM_STATE_MAXIMIZED_VERT; Atom _NET_WM_STATE_MAXIMIZED_HORZ = data->videodata->_NET_WM_STATE_MAXIMIZED_HORZ; if (maximized) { window->flags |= SDL_WINDOW_MAXIMIZED; } else { window->flags &= ~SDL_WINDOW_MAXIMIZED; } if (X11_IsWindowMapped(_this, window)) { XEvent e; SDL_zero(e); e.xany.type = ClientMessage; e.xclient.message_type = _NET_WM_STATE; e.xclient.format = 32; e.xclient.window = data->xwindow; e.xclient.data.l[0] = maximized ? _NET_WM_STATE_ADD : _NET_WM_STATE_REMOVE; e.xclient.data.l[1] = _NET_WM_STATE_MAXIMIZED_VERT; e.xclient.data.l[2] = _NET_WM_STATE_MAXIMIZED_HORZ; e.xclient.data.l[3] = 0l; X11_XSendEvent(display, RootWindow(display, displaydata->screen), 0, SubstructureNotifyMask | SubstructureRedirectMask, &e); } else { X11_SetNetWMState(_this, data->xwindow, window->flags); } X11_XFlush(display); } void X11_MaximizeWindow(_THIS, SDL_Window * window) { SetWindowMaximized(_this, window, SDL_TRUE); } void X11_MinimizeWindow(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; SDL_DisplayData *displaydata = (SDL_DisplayData *) SDL_GetDisplayForWindow(window)->driverdata; Display *display = data->videodata->display; X11_XIconifyWindow(display, data->xwindow, displaydata->screen); X11_XFlush(display); } void X11_RestoreWindow(_THIS, SDL_Window * window) { SetWindowMaximized(_this, window, SDL_FALSE); X11_ShowWindow(_this, window); SetWindowActive(_this, window); } /* This asks the Window Manager to handle fullscreen for us. This is the modern way. */ static void X11_SetWindowFullscreenViaWM(_THIS, SDL_Window * window, SDL_VideoDisplay * _display, SDL_bool fullscreen) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; SDL_DisplayData *displaydata = (SDL_DisplayData *) _display->driverdata; Display *display = data->videodata->display; Atom _NET_WM_STATE = data->videodata->_NET_WM_STATE; Atom _NET_WM_STATE_FULLSCREEN = data->videodata->_NET_WM_STATE_FULLSCREEN; if (X11_IsWindowMapped(_this, window)) { XEvent e; if (!(window->flags & SDL_WINDOW_RESIZABLE)) { /* Compiz refuses fullscreen toggle if we're not resizable, so update the hints so we can be resized to the fullscreen resolution (or reset so we're not resizable again) */ XSizeHints *sizehints = X11_XAllocSizeHints(); long flags = 0; X11_XGetWMNormalHints(display, data->xwindow, sizehints, &flags); /* set the resize flags on */ if (fullscreen) { /* we are going fullscreen so turn the flags off */ sizehints->flags &= ~(PMinSize | PMaxSize); } else { /* Reset the min/max width height to make the window non-resizable again */ sizehints->flags |= PMinSize | PMaxSize; sizehints->min_width = sizehints->max_width = window->windowed.w; sizehints->min_height = sizehints->max_height = window->windowed.h; } X11_XSetWMNormalHints(display, data->xwindow, sizehints); X11_XFree(sizehints); } SDL_zero(e); e.xany.type = ClientMessage; e.xclient.message_type = _NET_WM_STATE; e.xclient.format = 32; e.xclient.window = data->xwindow; e.xclient.data.l[0] = fullscreen ? _NET_WM_STATE_ADD : _NET_WM_STATE_REMOVE; e.xclient.data.l[1] = _NET_WM_STATE_FULLSCREEN; e.xclient.data.l[3] = 0l; X11_XSendEvent(display, RootWindow(display, displaydata->screen), 0, SubstructureNotifyMask | SubstructureRedirectMask, &e); /* Fullscreen windows sometimes end up being marked maximized by window managers. Force it back to how we expect it to be. */ if (!fullscreen && ((window->flags & SDL_WINDOW_MAXIMIZED) == 0)) { SDL_zero(e); e.xany.type = ClientMessage; e.xclient.message_type = _NET_WM_STATE; e.xclient.format = 32; e.xclient.window = data->xwindow; e.xclient.data.l[0] = _NET_WM_STATE_REMOVE; e.xclient.data.l[1] = data->videodata->_NET_WM_STATE_MAXIMIZED_VERT; e.xclient.data.l[2] = data->videodata->_NET_WM_STATE_MAXIMIZED_HORZ; e.xclient.data.l[3] = 0l; X11_XSendEvent(display, RootWindow(display, displaydata->screen), 0, SubstructureNotifyMask | SubstructureRedirectMask, &e); } } else { Uint32 flags; flags = window->flags; if (fullscreen) { flags |= SDL_WINDOW_FULLSCREEN; } else { flags &= ~SDL_WINDOW_FULLSCREEN; } X11_SetNetWMState(_this, data->xwindow, flags); } if (data->visual->class == DirectColor) { if ( fullscreen ) { X11_XInstallColormap(display, data->colormap); } else { X11_XUninstallColormap(display, data->colormap); } } X11_XFlush(display); } /* This handles fullscreen itself, outside the Window Manager. */ static void X11_BeginWindowFullscreenLegacy(_THIS, SDL_Window * window, SDL_VideoDisplay * _display) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; SDL_DisplayData *displaydata = (SDL_DisplayData *) _display->driverdata; Visual *visual = data->visual; Display *display = data->videodata->display; const int screen = displaydata->screen; Window root = RootWindow(display, screen); const int def_vis = (visual == DefaultVisual(display, screen)); unsigned long xattrmask = 0; XSetWindowAttributes xattr; XEvent ev; SDL_Rect rect; if ( data->fswindow ) { return; /* already fullscreen, I hope. */ } X11_GetDisplayBounds(_this, _display, &rect); SDL_zero(xattr); xattr.override_redirect = True; xattrmask |= CWOverrideRedirect; xattr.background_pixel = def_vis ? BlackPixel(display, screen) : 0; xattrmask |= CWBackPixel; xattr.border_pixel = 0; xattrmask |= CWBorderPixel; xattr.colormap = data->colormap; xattrmask |= CWColormap; data->fswindow = X11_XCreateWindow(display, root, rect.x, rect.y, rect.w, rect.h, 0, displaydata->depth, InputOutput, visual, xattrmask, &xattr); X11_XSelectInput(display, data->fswindow, StructureNotifyMask); X11_XSetWindowBackground(display, data->fswindow, 0); X11_XInstallColormap(display, data->colormap); X11_XClearWindow(display, data->fswindow); X11_XMapRaised(display, data->fswindow); /* Make sure the fswindow is in view by warping mouse to the corner */ X11_XUngrabPointer(display, CurrentTime); X11_XWarpPointer(display, None, root, 0, 0, 0, 0, rect.x, rect.y); /* Wait to be mapped, filter Unmap event out if it arrives. */ X11_XIfEvent(display, &ev, &isMapNotify, (XPointer)&data->fswindow); X11_XCheckIfEvent(display, &ev, &isUnmapNotify, (XPointer)&data->fswindow); #if SDL_VIDEO_DRIVER_X11_XVIDMODE if ( displaydata->use_vidmode ) { X11_XF86VidModeLockModeSwitch(display, screen, True); } #endif SetWindowBordered(display, displaydata->screen, data->xwindow, SDL_FALSE); /* Center actual window within our cover-the-screen window. */ X11_XReparentWindow(display, data->xwindow, data->fswindow, (rect.w - window->w) / 2, (rect.h - window->h) / 2); /* Move the mouse to the upper left to make sure it's on-screen */ X11_XWarpPointer(display, None, root, 0, 0, 0, 0, rect.x, rect.y); /* Center mouse in the fullscreen window. */ rect.x += (rect.w / 2); rect.y += (rect.h / 2); X11_XWarpPointer(display, None, root, 0, 0, 0, 0, rect.x, rect.y); /* Wait to be mapped, filter Unmap event out if it arrives. */ X11_XIfEvent(display, &ev, &isMapNotify, (XPointer)&data->xwindow); X11_XCheckIfEvent(display, &ev, &isUnmapNotify, (XPointer)&data->xwindow); SDL_UpdateWindowGrab(window); } static void X11_EndWindowFullscreenLegacy(_THIS, SDL_Window * window, SDL_VideoDisplay * _display) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; SDL_DisplayData *displaydata = (SDL_DisplayData *) _display->driverdata; Display *display = data->videodata->display; const int screen = displaydata->screen; Window root = RootWindow(display, screen); Window fswindow = data->fswindow; XEvent ev; if (!data->fswindow) { return; /* already not fullscreen, I hope. */ } data->fswindow = None; #if SDL_VIDEO_DRIVER_X11_VIDMODE if ( displaydata->use_vidmode ) { X11_XF86VidModeLockModeSwitch(display, screen, False); } #endif SDL_UpdateWindowGrab(window); X11_XReparentWindow(display, data->xwindow, root, window->x, window->y); /* flush these events so they don't confuse normal event handling */ X11_XSync(display, False); X11_XCheckIfEvent(display, &ev, &isMapNotify, (XPointer)&data->xwindow); X11_XCheckIfEvent(display, &ev, &isUnmapNotify, (XPointer)&data->xwindow); SetWindowBordered(display, screen, data->xwindow, (window->flags & SDL_WINDOW_BORDERLESS) == 0); X11_XWithdrawWindow(display, fswindow, screen); /* Wait to be unmapped. */ X11_XIfEvent(display, &ev, &isUnmapNotify, (XPointer)&fswindow); X11_XDestroyWindow(display, fswindow); } void X11_SetWindowFullscreen(_THIS, SDL_Window * window, SDL_VideoDisplay * _display, SDL_bool fullscreen) { /* !!! FIXME: SDL_Hint? */ SDL_bool legacy = SDL_FALSE; const char *env = SDL_getenv("SDL_VIDEO_X11_LEGACY_FULLSCREEN"); if (env) { legacy = SDL_atoi(env); } else { SDL_VideoData *videodata = (SDL_VideoData *) _this->driverdata; SDL_DisplayData *displaydata = (SDL_DisplayData *) _display->driverdata; if ( displaydata->use_vidmode ) { legacy = SDL_TRUE; /* the new stuff only works with XRandR. */ } else if ( !videodata->net_wm ) { legacy = SDL_TRUE; /* The window manager doesn't support it */ } else { /* !!! FIXME: look at the window manager name, and blacklist certain ones? */ /* http://stackoverflow.com/questions/758648/find-the-name-of-the-x-window-manager */ legacy = SDL_FALSE; /* try the new way. */ } } if (legacy) { if (fullscreen) { X11_BeginWindowFullscreenLegacy(_this, window, _display); } else { X11_EndWindowFullscreenLegacy(_this, window, _display); } } else { X11_SetWindowFullscreenViaWM(_this, window, _display, fullscreen); } } int X11_SetWindowGammaRamp(_THIS, SDL_Window * window, const Uint16 * ramp) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; Visual *visual = data->visual; Colormap colormap = data->colormap; XColor *colorcells; int ncolors; int rmask, gmask, bmask; int rshift, gshift, bshift; int i; if (visual->class != DirectColor) { return SDL_SetError("Window doesn't have DirectColor visual"); } ncolors = visual->map_entries; colorcells = SDL_malloc(ncolors * sizeof(XColor)); if (!colorcells) { return SDL_OutOfMemory(); } rshift = 0; rmask = visual->red_mask; while (0 == (rmask & 1)) { rshift++; rmask >>= 1; } gshift = 0; gmask = visual->green_mask; while (0 == (gmask & 1)) { gshift++; gmask >>= 1; } bshift = 0; bmask = visual->blue_mask; while (0 == (bmask & 1)) { bshift++; bmask >>= 1; } /* build the color table pixel values */ for (i = 0; i < ncolors; i++) { Uint32 rbits = (rmask * i) / (ncolors - 1); Uint32 gbits = (gmask * i) / (ncolors - 1); Uint32 bbits = (bmask * i) / (ncolors - 1); Uint32 pix = (rbits << rshift) | (gbits << gshift) | (bbits << bshift); colorcells[i].pixel = pix; colorcells[i].red = ramp[(0 * 256) + i]; colorcells[i].green = ramp[(1 * 256) + i]; colorcells[i].blue = ramp[(2 * 256) + i]; colorcells[i].flags = DoRed | DoGreen | DoBlue; } X11_XStoreColors(display, colormap, colorcells, ncolors); X11_XFlush(display); SDL_free(colorcells); return 0; } void X11_SetWindowMouseGrab(_THIS, SDL_Window * window, SDL_bool grabbed) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; SDL_bool oldstyle_fullscreen; /* ICCCM2.0-compliant window managers can handle fullscreen windows If we're using XVidMode to change resolution we need to confine the cursor so we don't pan around the virtual desktop. */ oldstyle_fullscreen = X11_IsWindowLegacyFullscreen(_this, window); if (oldstyle_fullscreen || grabbed) { /* If the window is unmapped, XGrab calls return GrabNotViewable, so when we get a MapNotify later, we'll try to update the grab as appropriate. */ if (window->flags & SDL_WINDOW_HIDDEN) { return; } /* Try to grab the mouse */ if (!data->videodata->broken_pointer_grab) { const unsigned int mask = ButtonPressMask | ButtonReleaseMask | PointerMotionMask | FocusChangeMask; int attempts; int result; /* Try for up to 5000ms (5s) to grab. If it still fails, stop trying. */ for (attempts = 0; attempts < 100; attempts++) { result = X11_XGrabPointer(display, data->xwindow, True, mask, GrabModeAsync, GrabModeAsync, data->xwindow, None, CurrentTime); if (result == GrabSuccess) { break; } SDL_Delay(50); } if (result != GrabSuccess) { SDL_LogWarn(SDL_LOG_CATEGORY_VIDEO, "The X server refused to let us grab the mouse. You might experience input bugs."); data->videodata->broken_pointer_grab = SDL_TRUE; /* don't try again. */ } } /* Raise the window if we grab the mouse */ X11_XRaiseWindow(display, data->xwindow); /* Now grab the keyboard on old-style fullscreen */ if (oldstyle_fullscreen) { X11_SetWindowKeyboardGrab(_this, window, SDL_TRUE); } } else { X11_XUngrabPointer(display, CurrentTime); } X11_XSync(display, False); } void X11_SetWindowKeyboardGrab(_THIS, SDL_Window * window, SDL_bool grabbed) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; if (grabbed) { /* If the window is unmapped, XGrab calls return GrabNotViewable, so when we get a MapNotify later, we'll try to update the grab as appropriate. */ if (window->flags & SDL_WINDOW_HIDDEN) { return; } X11_XGrabKeyboard(display, data->xwindow, True, GrabModeAsync, GrabModeAsync, CurrentTime); } else { X11_XUngrabKeyboard(display, CurrentTime); } X11_XSync(display, False); } void X11_DestroyWindow(_THIS, SDL_Window * window) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; if (data) { SDL_VideoData *videodata = (SDL_VideoData *) data->videodata; Display *display = videodata->display; int numwindows = videodata->numwindows; SDL_WindowData **windowlist = videodata->windowlist; int i; if (windowlist) { for (i = 0; i < numwindows; ++i) { if (windowlist[i] && (windowlist[i]->window == window)) { windowlist[i] = windowlist[numwindows - 1]; windowlist[numwindows - 1] = NULL; videodata->numwindows--; break; } } } #ifdef X_HAVE_UTF8_STRING if (data->ic) { X11_XDestroyIC(data->ic); } #endif if (data->created) { X11_XDestroyWindow(display, data->xwindow); X11_XFlush(display); } SDL_free(data); } window->driverdata = NULL; } SDL_bool X11_GetWindowWMInfo(_THIS, SDL_Window * window, SDL_SysWMinfo * info) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display; if (!data) { /* This sometimes happens in SDL_IBus_UpdateTextRect() while creating the window */ SDL_SetError("Window not initialized"); return SDL_FALSE; } display = data->videodata->display; if (info->version.major == SDL_MAJOR_VERSION && info->version.minor == SDL_MINOR_VERSION) { info->subsystem = SDL_SYSWM_X11; info->info.x11.display = display; info->info.x11.window = data->xwindow; return SDL_TRUE; } else { SDL_SetError("Application not compiled with SDL %d.%d", SDL_MAJOR_VERSION, SDL_MINOR_VERSION); return SDL_FALSE; } } int X11_SetWindowHitTest(SDL_Window *window, SDL_bool enabled) { return 0; /* just succeed, the real work is done elsewhere. */ } void X11_AcceptDragAndDrop(SDL_Window * window, SDL_bool accept) { SDL_WindowData *data = (SDL_WindowData *) window->driverdata; Display *display = data->videodata->display; Atom XdndAware = X11_XInternAtom(display, "XdndAware", False); if (accept) { Atom xdnd_version = 5; X11_XChangeProperty(display, data->xwindow, XdndAware, XA_ATOM, 32, PropModeReplace, (unsigned char*)&xdnd_version, 1); } else { X11_XDeleteProperty(display, data->xwindow, XdndAware); } } #endif /* SDL_VIDEO_DRIVER_X11 */ /* vi: set ts=4 sw=4 expandtab: */