{ "metadata": { "prompt_num_line": 5, "min_lines": 20 }, "data": { "static int __init cops_irq (int ioaddr, int board)\n{ /*\n * This does not use the IRQ to determine where the IRQ is. We just\n * assume that when we get a correct status response that it's the IRQ.\n * This really just verifies the IO port but since we only have access": "static int __init cops_irq (int ioaddr, int board)\n{ /*\n * This does not use the IRQ to determine where the IRQ is. We just\n * assume that when we get a correct status response that it's the IRQ.\n * This really just verifies the IO port but since we only have access\n * to such a small number of IRQs (5, 4, 3) this is not bad.\n * This will probably not work for more than one card.\n */\n int irqaddr=0;\n int i, x, status;\n\n if(board==DAYNA)\n {\n outb(0, ioaddr+DAYNA_RESET);\n inb(ioaddr+DAYNA_RESET);\n mdelay(333);\n }\n if(board==TANGENT)\n {\n inb(ioaddr);\n outb(0, ioaddr);\n outb(0, ioaddr+TANG_RESET);\n }\n\n for(i=0; cops_irqlist[i] !=0; i++)\n {\n irqaddr = cops_irqlist[i];\n for(x = 0xFFFF; x>0; x --) /* wait for response */\n {\n if(board==DAYNA)\n {\n status = (inb(ioaddr+DAYNA_CARD_STATUS)&3);\n if(status == 1)\n return irqaddr;\n }\n if(board==TANGENT)\n {\n if((inb(ioaddr+TANG_CARD_STATUS)& TANG_TX_READY) !=0)\n return irqaddr;\n }\n }\n }\n return 0; /* no IRQ found */\n}", "static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)\n{\n int n;\n int const maxSymbolValue1 = (int)maxSymbolValue + 1;\n": "static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)\n{\n int n;\n int const maxSymbolValue1 = (int)maxSymbolValue + 1;\n\n /* Compute base and set curr to base.\n * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.\n * Then 2^lowerRank <= count[n]+1 <= 2^rank.\n * We attribute each symbol to lowerRank's base value, because we want to know where\n * each rank begins in the output, so for rank R we want to count ranks R+1 and above.\n */\n ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);\n for (n = 0; n < maxSymbolValue1; ++n) {\n U32 lowerRank = BIT_highbit32(count[n] + 1);\n rankPosition[lowerRank].base++;\n }\n assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);\n for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {\n rankPosition[n-1].base += rankPosition[n].base;\n rankPosition[n-1].curr = rankPosition[n-1].base;\n }\n /* Sort */\n for (n = 0; n < maxSymbolValue1; ++n) {\n U32 const c = count[n];\n U32 const r = BIT_highbit32(c+1) + 1;\n U32 pos = rankPosition[r].curr++;\n /* Insert into the correct position in the rank.\n * We have at most 256 symbols, so this insertion should be fine.\n */\n while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {\n huffNode[pos] = huffNode[pos-1];\n pos--;\n }\n huffNode[pos].count = c;\n huffNode[pos].byte = (BYTE)n;\n }\n}", "static void ae7_post_dsp_asi_setup(struct hda_codec *codec)\n{\n\tchipio_8051_write_direct(codec, 0x93, 0x10);\n\n\tchipio_8051_write_pll_pmu(codec, 0x44, 0xc2);": "static void ae7_post_dsp_asi_setup(struct hda_codec *codec)\n{\n\tchipio_8051_write_direct(codec, 0x93, 0x10);\n\n\tchipio_8051_write_pll_pmu(codec, 0x44, 0xc2);\n\n\tca0113_mmio_command_set_type2(codec, 0x48, 0x07, 0x83);\n\tca0113_mmio_command_set(codec, 0x30, 0x2e, 0x3f);\n\n\tchipio_set_control_param(codec, 3, 3);\n\tchipio_set_control_flag(codec, CONTROL_FLAG_ASI_96KHZ, 1);\n\n\tsnd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0, 0x724, 0x83);\n\tchipio_set_control_param(codec, CONTROL_PARAM_ASI, 0);\n\tsnd_hda_codec_write(codec, 0x17, 0, 0x794, 0x00);\n\n\tchipio_8051_write_exram(codec, 0xfa92, 0x22);\n\n\tae7_post_dsp_pll_setup(codec);\n\tae7_post_dsp_asi_stream_setup(codec);\n\n\tchipio_8051_write_pll_pmu(codec, 0x43, 0xc7);\n\n\tae7_post_dsp_asi_setup_ports(codec);\n}", "static void show_usage(void)\n{\n\tprintf(\n\"Usage: %s [OPTION]...\\n\\n\"\n\"Collect closely occurring latencies from %s\\n\"": "static void show_usage(void)\n{\n\tprintf(\n\"Usage: %s [OPTION]...\\n\\n\"\n\"Collect closely occurring latencies from %s\\n\"\n\"with any of the following tracers: preemptirqsoff, preemptoff, irqsoff, \"\n\"wakeup,\\nwakeup_dl, or wakeup_rt.\\n\\n\"\n\n\"The occurrence of a latency is detected by monitoring the file\\n\"\n\"%s with inotify.\\n\\n\"\n\n\"The following options are supported:\\n\\n\"\n\n\"-l, --list\\t\\tList the latency tracers that are supported by the\\n\"\n\"\\t\\t\\tcurrently running Linux kernel. If you don't see the\\n\"\n\"\\t\\t\\ttracer that you want, you will probably need to\\n\"\n\"\\t\\t\\tchange your kernel config and build a new kernel.\\n\\n\"\n\n\"-t, --tracer TR\\t\\tUse the tracer TR. The default is to use the first\\n\"\n\"\\t\\t\\ttracer that is supported by the kernel in the following\\n\"\n\"\\t\\t\\torder of precedence:\\n\\n\"\n\"\\t\\t\\tpreemptirqsoff\\n\"\n\"\\t\\t\\tpreemptoff\\n\"\n\"\\t\\t\\tirqsoff\\n\"\n\"\\t\\t\\twakeup\\n\"\n\"\\t\\t\\twakeup_rt\\n\"\n\"\\t\\t\\twakeup_dl\\n\"\n\"\\n\"\n\"\\t\\t\\tIf TR is not on the list above, then a warning will be\\n\"\n\"\\t\\t\\tprinted.\\n\\n\"\n\n\"-F, --force\\t\\tProceed even if another ftrace tracer is active. Without\\n\"\n\"\\t\\t\\tthis option, the program will refuse to start tracing if\\n\"\n\"\\t\\t\\tany other tracer than the nop tracer is active.\\n\\n\"\n\n\"-s, --threshold TH\\tConfigure ftrace to use a threshold of TH microseconds\\n\"\n\"\\t\\t\\tfor the tracer. The default is 0, which means that\\n\"\n\"\\t\\t\\ttracing_max_latency will be used. tracing_max_latency is\\n\"\n\"\\t\\t\\tset to 0 when the program is started and contains the\\n\"\n\"\\t\\t\\tmaximum of the latencies that have been encountered.\\n\\n\"\n\n\"-f, --function\\t\\tEnable the function-trace option in trace_options. With\\n\"\n\"\\t\\t\\tthis option, ftrace will trace the functions that are\\n\"\n\"\\t\\t\\texecuted during a latency, without it we only get the\\n\"\n\"\\t\\t\\tbeginning, end, and backtrace.\\n\\n\"\n\n\"-g, --graph\\t\\tEnable the display-graph option in trace_option. This\\n\"\n\"\\t\\t\\toption causes ftrace to show the graph of how functions\\n\"\n\"\\t\\t\\tare calling other functions.\\n\\n\"\n\n\"-c, --policy POL\\tRun the program with scheduling policy POL. POL can be\\n\"\n\"\\t\\t\\tother, batch, idle, rr or fifo. The default is rr. When\\n\"\n\"\\t\\t\\tusing rr or fifo, remember that these policies may cause\\n\"\n\"\\t\\t\\tother tasks to experience latencies.\\n\\n\"\n\n\"-p, --priority PRI\\tRun the program with priority PRI. The acceptable range\\n\"\n\"\\t\\t\\tof PRI depends on the scheduling policy.\\n\\n\"\n\n\"-n, --notrace\\t\\tIf latency is detected, do not print out the content of\\n\"\n\"\\t\\t\\tthe trace file to standard output\\n\\n\"\n\n\"-t, --threads NRTHR\\tRun NRTHR threads for printing. Default is %d.\\n\\n\"\n\n\"-r, --random\\t\\tArbitrarily sleep a certain amount of time, default\\n\"\n\"\\t\\t\\t%ld ms, before reading the trace file. The\\n\"\n\"\\t\\t\\tprobabilities for sleep are chosen so that the\\n\"\n\"\\t\\t\\tprobability of obtaining any of a cluster of closely\\n\"\n\"\\t\\t\\toccurring latencies are equal, i.e. we will randomly\\n\"\n\"\\t\\t\\tchoose which one we collect from the trace file.\\n\\n\"\n\"\\t\\t\\tThis option is probably only useful with the irqsoff,\\n\"\n\"\\t\\t\\tpreemptoff, and preemptirqsoff tracers.\\n\\n\"\n\n\"-a, --nrlat NRLAT\\tFor the purpose of arbitrary delay, assume that there\\n\"\n\"\\t\\t\\tare no more than NRLAT clustered latencies. If NRLAT\\n\"\n\"\\t\\t\\tlatencies are detected during a run, this value will\\n\"\n\"\\t\\t\\tautomatically be increased to NRLAT + 1 and then to\\n\"\n\"\\t\\t\\tNRLAT + 2 and so on. The default is %d. This option\\n\"\n\"\\t\\t\\timplies -r. We need to know this number in order to\\n\"\n\"\\t\\t\\tbe able to calculate the probabilities of sleeping.\\n\"\n\"\\t\\t\\tSpecifically, the probabilities of not sleeping, i.e. to\\n\"\n\"\\t\\t\\tdo an immediate printout will be:\\n\\n\"\n\"\\t\\t\\t1/NRLAT 1/(NRLAT - 1) ... 1/3 1/2 1\\n\\n\"\n\"\\t\\t\\tThe probability of sleeping will be:\\n\\n\"\n\"\\t\\t\\t1 - P, where P is from the series above\\n\\n\"\n\"\\t\\t\\tThis descending probability will cause us to choose\\n\"\n\"\\t\\t\\tan occurrence at random. Observe that the final\\n\"\n\"\\t\\t\\tprobability is 0, it is when we reach this probability\\n\"\n\"\\t\\t\\tthat we increase NRLAT automatically. As an example,\\n\"\n\"\\t\\t\\twith the default value of 2, the probabilities will be:\\n\\n\"\n\"\\t\\t\\t1/2 0\\n\\n\"\n\"\\t\\t\\tThis means, when a latency is detected we will sleep\\n\"\n\"\\t\\t\\twith 50%% probability. If we ever detect another latency\\n\"\n\"\\t\\t\\tduring the sleep period, then the probability of sleep\\n\"\n\"\\t\\t\\twill be 0%% and the table will be expanded to:\\n\\n\"\n\"\\t\\t\\t1/3 1/2 0\\n\\n\"\n\n\"-v, --verbose\\t\\tIncrease the verbosity. If this option is given once,\\n\"\n\"\\t\\t\\tthen print a message every time that the NRLAT value\\n\"\n\"\\t\\t\\tis automatically increased. It also causes a message to\\n\"\n\"\\t\\t\\tbe printed when the ftrace settings are changed. If this\\n\"\n\"\\t\\t\\toption is given at least twice, then also print a\\n\"\n\"\\t\\t\\twarning for lost events.\\n\\n\"\n\n\"-u, --time TIME\\t\\tArbitrarily sleep for a specified time TIME ms before\\n\"\n\"\\t\\t\\tprinting out the trace from the trace file. The default\\n\"\n\"\\t\\t\\tis %ld ms. This option implies -r.\\n\\n\"\n\n\"-x, --no-ftrace\\t\\tDo not configure ftrace. This assume that the user\\n\"\n\"\\t\\t\\tconfigures the ftrace files in sysfs such as\\n\"\n\"\\t\\t\\t/sys/kernel/tracing/current_tracer or equivalent.\\n\\n\"\n\n\"-i, --tracefile FILE\\tUse FILE as trace file. The default is\\n\"\n\"\\t\\t\\t%s.\\n\"\n\"\\t\\t\\tThis options implies -x\\n\\n\"\n\n\"-m, --max-lat FILE\\tUse FILE as tracing_max_latency file. The default is\\n\"\n\"\\t\\t\\t%s.\\n\"\n\"\\t\\t\\tThis options implies -x\\n\\n\"\n,\nprg_name, debug_tracefile_dflt, debug_maxlat_dflt, DEFAULT_NR_PRINTER_THREADS,\nSLEEP_TIME_MS_DEFAULT, DEFAULT_TABLE_SIZE, SLEEP_TIME_MS_DEFAULT,\ndebug_tracefile_dflt, debug_maxlat_dflt);\n}", "static int cake_config_diffserv4(struct Qdisc *sch)\n{\n/* Further pruned list of traffic classes for four-class system:\n *\n *\t Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)": "static int cake_config_diffserv4(struct Qdisc *sch)\n{\n/* Further pruned list of traffic classes for four-class system:\n *\n *\t Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)\n *\t Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2)\n *\t Best Effort (DF, AF1x, TOS2, and those not specified)\n *\t Background Traffic (LE, CS1)\n *\n *\t\tTotal 4 traffic classes.\n */\n\n\tstruct cake_sched_data *q = qdisc_priv(sch);\n\tu32 mtu = psched_mtu(qdisc_dev(sch));\n\tu64 rate = q->rate_bps;\n\tu32 quantum = 1024;\n\n\tq->tin_cnt = 4;\n\n\t/* codepoint to class mapping */\n\tq->tin_index = diffserv4;\n\tq->tin_order = bulk_order;\n\n\t/* class characteristics */\n\tcake_set_rate(&q->tins[0], rate, mtu,\n\t\t us_to_ns(q->target), us_to_ns(q->interval));\n\tcake_set_rate(&q->tins[1], rate >> 4, mtu,\n\t\t us_to_ns(q->target), us_to_ns(q->interval));\n\tcake_set_rate(&q->tins[2], rate >> 1, mtu,\n\t\t us_to_ns(q->target), us_to_ns(q->interval));\n\tcake_set_rate(&q->tins[3], rate >> 2, mtu,\n\t\t us_to_ns(q->target), us_to_ns(q->interval));\n\n\t/* bandwidth-sharing weights */\n\tq->tins[0].tin_quantum = quantum;\n\tq->tins[1].tin_quantum = quantum >> 4;\n\tq->tins[2].tin_quantum = quantum >> 1;\n\tq->tins[3].tin_quantum = quantum >> 2;\n\n\treturn 0;\n}", "static int scan_header(partition_t *part)\n{\n erase_unit_header_t header;\n loff_t offset, max_offset;\n size_t ret;": "static int scan_header(partition_t *part)\n{\n erase_unit_header_t header;\n loff_t offset, max_offset;\n size_t ret;\n int err;\n part->header.FormattedSize = 0;\n max_offset = (0x100000mbd.mtd->size)?0x100000:part->mbd.mtd->size;\n /* Search first megabyte for a valid FTL header */\n for (offset = 0;\n\t (offset + sizeof(header)) < max_offset;\n\t offset += part->mbd.mtd->erasesize ? : 0x2000) {\n\n\terr = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret,\n (unsigned char *)&header);\n\n\tif (err)\n\t return err;\n\n\tif (strcmp(header.DataOrgTuple+3, \"FTL100\") == 0) break;\n }\n\n if (offset == max_offset) {\n\tprintk(KERN_NOTICE \"ftl_cs: FTL header not found.\\n\");\n\treturn -ENOENT;\n }\n if (header.BlockSize != 9 ||\n\t(header.EraseUnitSize < 10) || (header.EraseUnitSize > 31) ||\n\t(header.NumTransferUnits >= le16_to_cpu(header.NumEraseUnits))) {\n\tprintk(KERN_NOTICE \"ftl_cs: FTL header corrupt!\\n\");\n\treturn -1;\n }\n if ((1 << header.EraseUnitSize) != part->mbd.mtd->erasesize) {\n\tprintk(KERN_NOTICE \"ftl: FTL EraseUnitSize %x != MTD erasesize %x\\n\",\n\t 1 << header.EraseUnitSize,part->mbd.mtd->erasesize);\n\treturn -1;\n }\n part->header = header;\n return 0;\n}", "static void zram_accessed(struct zram *zram, u32 index)\n{\n\tzram_clear_flag(zram, index, ZRAM_IDLE);\n};\nstatic void zram_debugfs_register(struct zram *zram) {};": "static void zram_accessed(struct zram *zram, u32 index)\n{\n\tzram_clear_flag(zram, index, ZRAM_IDLE);\n};\nstatic void zram_debugfs_register(struct zram *zram) {};\nstatic void zram_debugfs_unregister(struct zram *zram) {};\n#endif\n\n/*\n * We switched to per-cpu streams and this attr is not needed anymore.\n * However, we will keep it around for some time, because:\n * a) we may revert per-cpu streams in the future\n * b) it's visible to user space and we need to follow our 2 years\n * retirement rule; but we already have a number of 'soon to be\n * altered' attrs, so max_comp_streams need to wait for the next\n * layoff cycle.\n */\nstatic ssize_t max_comp_streams_show(struct device *dev,\n\t\tstruct device_attribute *attr, char *buf)\n{\n\treturn scnprintf(buf, PAGE_SIZE, \"%d\\n\", num_online_cpus());\n}", "static int dib8096p_get_best_sampling(struct dvb_frontend *fe, struct dibx090p_best_adc *adc)\n{\n\tu8 spur = 0, prediv = 0, loopdiv = 0, min_prediv = 1, max_prediv = 1;\n\tu16 xtal = 12000;\n\tu16 fcp_min = 1900; /* PLL, Minimum Frequency of phase comparator (KHz) */": "static int dib8096p_get_best_sampling(struct dvb_frontend *fe, struct dibx090p_best_adc *adc)\n{\n\tu8 spur = 0, prediv = 0, loopdiv = 0, min_prediv = 1, max_prediv = 1;\n\tu16 xtal = 12000;\n\tu16 fcp_min = 1900; /* PLL, Minimum Frequency of phase comparator (KHz) */\n\tu16 fcp_max = 20000; /* PLL, Maximum Frequency of phase comparator (KHz) */\n\tu32 fmem_max = 140000; /* 140MHz max SDRAM freq */\n\tu32 fdem_min = 66000;\n\tu32 fcp = 0, fs = 0, fdem = 0, fmem = 0;\n\tu32 harmonic_id = 0;\n\n\tadc->timf = 0;\n\tadc->pll_loopdiv = loopdiv;\n\tadc->pll_prediv = prediv;\n\n\tdeb_info(\"bandwidth = %d\", fe->dtv_property_cache.bandwidth_hz);\n\n\t/* Find Min and Max prediv */\n\twhile ((xtal / max_prediv) >= fcp_min)\n\t\tmax_prediv++;\n\n\tmax_prediv--;\n\tmin_prediv = max_prediv;\n\twhile ((xtal / min_prediv) <= fcp_max) {\n\t\tmin_prediv--;\n\t\tif (min_prediv == 1)\n\t\t\tbreak;\n\t}\n\tdeb_info(\"MIN prediv = %d : MAX prediv = %d\", min_prediv, max_prediv);\n\n\tmin_prediv = 1;\n\n\tfor (prediv = min_prediv; prediv < max_prediv; prediv++) {\n\t\tfcp = xtal / prediv;\n\t\tif (fcp > fcp_min && fcp < fcp_max) {\n\t\t\tfor (loopdiv = 1; loopdiv < 64; loopdiv++) {\n\t\t\t\tfmem = ((xtal/prediv) * loopdiv);\n\t\t\t\tfdem = fmem / 2;\n\t\t\t\tfs = fdem / 4;\n\n\t\t\t\t/* test min/max system restrictions */\n\t\t\t\tif ((fdem >= fdem_min) && (fmem <= fmem_max) && (fs >= fe->dtv_property_cache.bandwidth_hz / 1000)) {\n\t\t\t\t\tspur = 0;\n\t\t\t\t\t/* test fs harmonics positions */\n\t\t\t\t\tfor (harmonic_id = (fe->dtv_property_cache.frequency / (1000 * fs)); harmonic_id <= ((fe->dtv_property_cache.frequency / (1000 * fs)) + 1); harmonic_id++) {\n\t\t\t\t\t\tif (((fs * harmonic_id) >= (fe->dtv_property_cache.frequency / 1000 - (fe->dtv_property_cache.bandwidth_hz / 2000))) && ((fs * harmonic_id) <= (fe->dtv_property_cache.frequency / 1000 + (fe->dtv_property_cache.bandwidth_hz / 2000)))) {\n\t\t\t\t\t\t\tspur = 1;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (!spur) {\n\t\t\t\t\t\tadc->pll_loopdiv = loopdiv;\n\t\t\t\t\t\tadc->pll_prediv = prediv;\n\t\t\t\t\t\tadc->timf = (4260880253U / fdem) * (1 << 8);\n\t\t\t\t\t\tadc->timf += ((4260880253U % fdem) << 8) / fdem;\n\n\t\t\t\t\t\tdeb_info(\"RF %6d; BW %6d; Xtal %6d; Fmem %6d; Fdem %6d; Fs %6d; Prediv %2d; Loopdiv %2d; Timf %8d;\", fe->dtv_property_cache.frequency, fe->dtv_property_cache.bandwidth_hz, xtal, fmem, fdem, fs, prediv, loopdiv, adc->timf);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif (!spur)\n\t\t\tbreak;\n\t}\n\n\tif (adc->pll_loopdiv == 0 && adc->pll_prediv == 0)\n\t\treturn -EINVAL;\n\treturn 0;\n}", "static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)\n{\n const seqStore_t* seqStore = ZSTD_getSeqStore(zc);\n const seqDef* seqStoreSeqs = seqStore->sequencesStart;\n size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;": "static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)\n{\n const seqStore_t* seqStore = ZSTD_getSeqStore(zc);\n const seqDef* seqStoreSeqs = seqStore->sequencesStart;\n size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;\n size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);\n size_t literalsRead = 0;\n size_t lastLLSize;\n\n ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];\n size_t i;\n repcodes_t updatedRepcodes;\n\n assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);\n /* Ensure we have enough space for last literals \"sequence\" */\n assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);\n ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));\n for (i = 0; i < seqStoreSeqSize; ++i) {\n U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM;\n outSeqs[i].litLength = seqStoreSeqs[i].litLength;\n outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH;\n outSeqs[i].rep = 0;\n\n if (i == seqStore->longLengthPos) {\n if (seqStore->longLengthID == 1) {\n outSeqs[i].litLength += 0x10000;\n } else if (seqStore->longLengthID == 2) {\n outSeqs[i].matchLength += 0x10000;\n }\n }\n\n if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) {\n /* Derive the correct offset corresponding to a repcode */\n outSeqs[i].rep = seqStoreSeqs[i].offset;\n if (outSeqs[i].litLength != 0) {\n rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];\n } else {\n if (outSeqs[i].rep == 3) {\n rawOffset = updatedRepcodes.rep[0] - 1;\n } else {\n rawOffset = updatedRepcodes.rep[outSeqs[i].rep];\n }\n }\n }\n outSeqs[i].offset = rawOffset;\n /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode\n so we provide seqStoreSeqs[i].offset - 1 */\n updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep,\n seqStoreSeqs[i].offset - 1,\n seqStoreSeqs[i].litLength == 0);\n literalsRead += outSeqs[i].litLength;\n }\n /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.\n * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker\n * for the block boundary, according to the API.\n */\n assert(seqStoreLiteralsSize >= literalsRead);\n lastLLSize = seqStoreLiteralsSize - literalsRead;\n outSeqs[i].litLength = (U32)lastLLSize;\n outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;\n seqStoreSeqSize++;\n zc->seqCollector.seqIndex += seqStoreSeqSize;\n}", "static void ae7_exit_chip(struct hda_codec *codec)\n{\n\tchipio_set_stream_control(codec, 0x18, 0);\n\tchipio_set_stream_source_dest(codec, 0x21, 0xc8, 0xc8);\n\tchipio_set_stream_channels(codec, 0x21, 0);": "static void ae7_exit_chip(struct hda_codec *codec)\n{\n\tchipio_set_stream_control(codec, 0x18, 0);\n\tchipio_set_stream_source_dest(codec, 0x21, 0xc8, 0xc8);\n\tchipio_set_stream_channels(codec, 0x21, 0);\n\tchipio_set_control_param(codec, CONTROL_PARAM_NODE_ID, 0x09);\n\tchipio_set_control_param(codec, 0x20, 0x01);\n\n\tchipio_set_control_param(codec, CONTROL_PARAM_ASI, 0);\n\n\tchipio_set_stream_control(codec, 0x18, 0);\n\tchipio_set_stream_control(codec, 0x0c, 0);\n\n\tca0113_mmio_command_set(codec, 0x30, 0x2b, 0x00);\n\tsnd_hda_codec_write(codec, 0x15, 0, 0x724, 0x83);\n\tca0113_mmio_command_set_type2(codec, 0x48, 0x07, 0x83);\n\tca0113_mmio_command_set(codec, 0x30, 0x30, 0x00);\n\tca0113_mmio_command_set(codec, 0x30, 0x2e, 0x00);\n\tca0113_mmio_gpio_set(codec, 0, false);\n\tca0113_mmio_gpio_set(codec, 1, false);\n\tca0113_mmio_command_set(codec, 0x30, 0x32, 0x3f);\n\n\tsnd_hda_codec_write(codec, 0x01, 0, 0x793, 0x00);\n\tsnd_hda_codec_write(codec, 0x01, 0, 0x794, 0x53);\n}", "static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)\n{\n unsigned maxBitsInMask = MIN(params->minMatchLength, 64);\n unsigned hashRateLog = params->hashRateLog;\n": "static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)\n{\n unsigned maxBitsInMask = MIN(params->minMatchLength, 64);\n unsigned hashRateLog = params->hashRateLog;\n\n state->rolling = ~(U32)0;\n\n /* The choice of the splitting criterion is subject to two conditions:\n * 1. it has to trigger on average every 2^(hashRateLog) bytes;\n * 2. ideally, it has to depend on a window of minMatchLength bytes.\n *\n * In the gear hash algorithm, bit n depends on the last n bytes;\n * so in order to obtain a good quality splitting criterion it is\n * preferable to use bits with high weight.\n *\n * To match condition 1 we use a mask with hashRateLog bits set\n * and, because of the previous remark, we make sure these bits\n * have the highest possible weight while still respecting\n * condition 2.\n */\n if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {\n state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);\n } else {\n /* In this degenerate case we simply honor the hash rate. */\n state->stopMask = ((U64)1 << hashRateLog) - 1;\n }\n}", "static int build_maps(partition_t *part)\n{\n erase_unit_header_t header;\n uint16_t xvalid, xtrans, i;\n unsigned blocks, j;": "static int build_maps(partition_t *part)\n{\n erase_unit_header_t header;\n uint16_t xvalid, xtrans, i;\n unsigned blocks, j;\n int hdr_ok, ret = -1;\n ssize_t retval;\n loff_t offset;\n\n /* Set up erase unit maps */\n part->DataUnits = le16_to_cpu(part->header.NumEraseUnits) -\n\tpart->header.NumTransferUnits;\n part->EUNInfo = kmalloc_array(part->DataUnits, sizeof(struct eun_info_t),\n GFP_KERNEL);\n if (!part->EUNInfo)\n\t goto out;\n for (i = 0; i < part->DataUnits; i++)\n\tpart->EUNInfo[i].Offset = 0xffffffff;\n part->XferInfo =\n\tkmalloc_array(part->header.NumTransferUnits,\n sizeof(struct xfer_info_t),\n GFP_KERNEL);\n if (!part->XferInfo)\n\t goto out_EUNInfo;\n\n xvalid = xtrans = 0;\n for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {\n\toffset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))\n\t\t << part->header.EraseUnitSize);\n\tret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval,\n (unsigned char *)&header);\n\n\tif (ret)\n\t goto out_XferInfo;\n\n\tret = -1;\n\t/* Is this a transfer partition? */\n\thdr_ok = (strcmp(header.DataOrgTuple+3, \"FTL100\") == 0);\n\tif (hdr_ok && (le16_to_cpu(header.LogicalEUN) < part->DataUnits) &&\n\t (part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset == 0xffffffff)) {\n\t part->EUNInfo[le16_to_cpu(header.LogicalEUN)].Offset = offset;\n\t part->EUNInfo[le16_to_cpu(header.LogicalEUN)].EraseCount =\n\t\tle32_to_cpu(header.EraseCount);\n\t xvalid++;\n\t} else {\n\t if (xtrans == part->header.NumTransferUnits) {\n\t\tprintk(KERN_NOTICE \"ftl_cs: format error: too many \"\n\t\t \"transfer units!\\n\");\n\t\tgoto out_XferInfo;\n\t }\n\t if (hdr_ok && (le16_to_cpu(header.LogicalEUN) == 0xffff)) {\n\t\tpart->XferInfo[xtrans].state = XFER_PREPARED;\n\t\tpart->XferInfo[xtrans].EraseCount = le32_to_cpu(header.EraseCount);\n\t } else {\n\t\tpart->XferInfo[xtrans].state = XFER_UNKNOWN;\n\t\t/* Pick anything reasonable for the erase count */\n\t\tpart->XferInfo[xtrans].EraseCount =\n\t\t le32_to_cpu(part->header.EraseCount);\n\t }\n\t part->XferInfo[xtrans].Offset = offset;\n\t xtrans++;\n\t}\n }\n /* Check for format trouble */\n header = part->header;\n if ((xtrans != header.NumTransferUnits) ||\n\t(xvalid+xtrans != le16_to_cpu(header.NumEraseUnits))) {\n\tprintk(KERN_NOTICE \"ftl_cs: format error: erase units \"\n\t \"don't add up!\\n\");\n\tgoto out_XferInfo;\n }\n\n /* Set up virtual page map */\n blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;\n part->VirtualBlockMap = vmalloc(array_size(blocks, sizeof(uint32_t)));\n if (!part->VirtualBlockMap)\n\t goto out_XferInfo;\n\n memset(part->VirtualBlockMap, 0xff, blocks * sizeof(uint32_t));\n part->BlocksPerUnit = (1 << header.EraseUnitSize) >> header.BlockSize;\n\n part->bam_cache = kmalloc_array(part->BlocksPerUnit, sizeof(uint32_t),\n GFP_KERNEL);\n if (!part->bam_cache)\n\t goto out_VirtualBlockMap;\n\n part->bam_index = 0xffff;\n part->FreeTotal = 0;\n\n for (i = 0; i < part->DataUnits; i++) {\n\tpart->EUNInfo[i].Free = 0;\n\tpart->EUNInfo[i].Deleted = 0;\n\toffset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);\n\n\tret = mtd_read(part->mbd.mtd, offset,\n part->BlocksPerUnit * sizeof(uint32_t), &retval,\n (unsigned char *)part->bam_cache);\n\n\tif (ret)\n\t\tgoto out_bam_cache;\n\n\tfor (j = 0; j < part->BlocksPerUnit; j++) {\n\t if (BLOCK_FREE(le32_to_cpu(part->bam_cache[j]))) {\n\t\tpart->EUNInfo[i].Free++;\n\t\tpart->FreeTotal++;\n\t } else if ((BLOCK_TYPE(le32_to_cpu(part->bam_cache[j])) == BLOCK_DATA) &&\n\t\t (BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j])) < blocks))\n\t\tpart->VirtualBlockMap[BLOCK_NUMBER(le32_to_cpu(part->bam_cache[j]))] =\n\t\t (i << header.EraseUnitSize) + (j << header.BlockSize);\n\t else if (BLOCK_DELETED(le32_to_cpu(part->bam_cache[j])))\n\t\tpart->EUNInfo[i].Deleted++;\n\t}\n }\n\n ret = 0;\n goto out;\n\nout_bam_cache:\n kfree(part->bam_cache);\nout_VirtualBlockMap:\n vfree(part->VirtualBlockMap);\nout_XferInfo:\n kfree(part->XferInfo);\nout_EUNInfo:\n kfree(part->EUNInfo);\nout:\n return ret;\n} /* build_maps */\n\n/*======================================================================\n\n Erase_xfer() schedules an asynchronous erase operation for a\n transfer unit.\n\n======================================================================*/\n\nstatic int erase_xfer(partition_t *part,\n\t\t uint16_t xfernum)\n{\n int ret;\n struct xfer_info_t *xfer;\n struct erase_info *erase;\n\n xfer = &part->XferInfo[xfernum];\n pr_debug(\"ftl_cs: erasing xfer unit at 0x%x\\n\", xfer->Offset);\n xfer->state = XFER_ERASING;\n\n /* Is there a free erase slot? Always in MTD. */\n\n\n erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL);\n if (!erase)\n return -ENOMEM;\n\n erase->addr = xfer->Offset;\n erase->len = 1 << part->header.EraseUnitSize;\n\n ret = mtd_erase(part->mbd.mtd, erase);\n if (!ret) {\n\txfer->state = XFER_ERASED;\n\txfer->EraseCount++;\n } else {\n\txfer->state = XFER_FAILED;\n\tpr_notice(\"ftl_cs: erase failed: err = %d\\n\", ret);\n }\n\n kfree(erase);\n\n return ret;\n} /* erase_xfer */\n\n/*======================================================================\n\n Prepare_xfer() takes a freshly erased transfer unit and gives\n it an appropriate header.\n\n======================================================================*/\n\nstatic int prepare_xfer(partition_t *part, int i)\n{\n erase_unit_header_t header;\n struct xfer_info_t *xfer;\n int nbam, ret;\n uint32_t ctl;\n ssize_t retlen;\n loff_t offset;\n\n xfer = &part->XferInfo[i];\n xfer->state = XFER_FAILED;\n\n pr_debug(\"ftl_cs: preparing xfer unit at 0x%x\\n\", xfer->Offset);\n\n /* Write the transfer unit header */\n header = part->header;\n header.LogicalEUN = cpu_to_le16(0xffff);\n header.EraseCount = cpu_to_le32(xfer->EraseCount);\n\n ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen,\n (u_char *)&header);\n\n if (ret) {\n\treturn ret;\n }\n\n /* Write the BAM stub */\n nbam = DIV_ROUND_UP(part->BlocksPerUnit * sizeof(uint32_t) +\n\t\t\tle32_to_cpu(part->header.BAMOffset), SECTOR_SIZE);\n\n offset = xfer->Offset + le32_to_cpu(part->header.BAMOffset);\n ctl = cpu_to_le32(BLOCK_CONTROL);\n\n for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {\n\n\tret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,\n (u_char *)&ctl);\n\n\tif (ret)\n\t return ret;\n }\n xfer->state = XFER_PREPARED;\n return 0;\n\n} /* prepare_xfer */\n\n/*======================================================================\n\n Copy_erase_unit() takes a full erase block and a transfer unit,\n copies everything to the transfer unit, then swaps the block\n pointers.\n\n All data blocks are copied to the corresponding blocks in the\n target unit, so the virtual block map does not need to be\n updated.\n\n======================================================================*/\n\nstatic int copy_erase_unit(partition_t *part, uint16_t srcunit,\n\t\t\t uint16_t xferunit)\n{\n u_char buf[SECTOR_SIZE];\n struct eun_info_t *eun;\n struct xfer_info_t *xfer;\n uint32_t src, dest, free, i;\n uint16_t unit;\n int ret;\n ssize_t retlen;\n loff_t offset;\n uint16_t srcunitswap = cpu_to_le16(srcunit);\n\n eun = &part->EUNInfo[srcunit];\n xfer = &part->XferInfo[xferunit];\n pr_debug(\"ftl_cs: copying block 0x%x to 0x%x\\n\",\n\t eun->Offset, xfer->Offset);\n\n\n /* Read current BAM */\n if (part->bam_index != srcunit) {\n\n\toffset = eun->Offset + le32_to_cpu(part->header.BAMOffset);\n\n\tret = mtd_read(part->mbd.mtd, offset,\n part->BlocksPerUnit * sizeof(uint32_t), &retlen,\n (u_char *)(part->bam_cache));\n\n\t/* mark the cache bad, in case we get an error later */\n\tpart->bam_index = 0xffff;\n\n\tif (ret) {\n\t printk( KERN_WARNING \"ftl: Failed to read BAM cache in copy_erase_unit()!\\n\");\n\t return ret;\n\t}\n }\n\n /* Write the LogicalEUN for the transfer unit */\n xfer->state = XFER_UNKNOWN;\n offset = xfer->Offset + 20; /* Bad! */\n unit = cpu_to_le16(0x7fff);\n\n ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen,\n (u_char *)&unit);\n\n if (ret) {\n\tprintk( KERN_WARNING \"ftl: Failed to write back to BAM cache in copy_erase_unit()!\\n\");\n\treturn ret;\n }\n\n /* Copy all data blocks from source unit to transfer unit */\n src = eun->Offset; dest = xfer->Offset;\n\n free = 0;\n ret = 0;\n for (i = 0; i < part->BlocksPerUnit; i++) {\n\tswitch (BLOCK_TYPE(le32_to_cpu(part->bam_cache[i]))) {\n\tcase BLOCK_CONTROL:\n\t /* This gets updated later */\n\t break;\n\tcase BLOCK_DATA:\n\tcase BLOCK_REPLACEMENT:\n\t ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen,\n (u_char *)buf);\n\t if (ret) {\n\t\tprintk(KERN_WARNING \"ftl: Error reading old xfer unit in copy_erase_unit\\n\");\n\t\treturn ret;\n }\n\n\n\t ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen,\n (u_char *)buf);\n\t if (ret) {\n\t\tprintk(KERN_WARNING \"ftl: Error writing new xfer unit in copy_erase_unit\\n\");\n\t\treturn ret;\n }\n\n\t break;\n\tdefault:\n\t /* All other blocks must be free */\n\t part->bam_cache[i] = cpu_to_le32(0xffffffff);\n\t free++;\n\t break;\n\t}\n\tsrc += SECTOR_SIZE;\n\tdest += SECTOR_SIZE;\n }\n\n /* Write the BAM to the transfer unit */\n ret = mtd_write(part->mbd.mtd,\n xfer->Offset + le32_to_cpu(part->header.BAMOffset),\n part->BlocksPerUnit * sizeof(int32_t),\n &retlen,\n (u_char *)part->bam_cache);\n if (ret) {\n\tprintk( KERN_WARNING \"ftl: Error writing BAM in copy_erase_unit\\n\");\n\treturn ret;\n }\n\n\n /* All clear? Then update the LogicalEUN again */\n ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),\n &retlen, (u_char *)&srcunitswap);\n\n if (ret) {\n\tprintk(KERN_WARNING \"ftl: Error writing new LogicalEUN in copy_erase_unit\\n\");\n\treturn ret;\n }\n\n\n /* Update the maps and usage stats*/\n swap(xfer->EraseCount, eun->EraseCount);\n swap(xfer->Offset, eun->Offset);\n part->FreeTotal -= eun->Free;\n part->FreeTotal += free;\n eun->Free = free;\n eun->Deleted = 0;\n\n /* Now, the cache should be valid for the new block */\n part->bam_index = srcunit;\n\n return 0;\n} /* copy_erase_unit */\n\n/*======================================================================\n\n reclaim_block() picks a full erase unit and a transfer unit and\n then calls copy_erase_unit() to copy one to the other. Then, it\n schedules an erase on the expired block.\n\n What's a good way to decide which transfer unit and which erase\n unit to use? Beats me. My way is to always pick the transfer\n unit with the fewest erases, and usually pick the data unit with\n the most deleted blocks. But with a small probability, pick the\n oldest data unit instead. This means that we generally postpone\n the next reclamation as long as possible, but shuffle static\n stuff around a bit for wear leveling.\n\n======================================================================*/\n\nstatic int reclaim_block(partition_t *part)\n{\n uint16_t i, eun, xfer;\n uint32_t best;\n int queued, ret;\n\n pr_debug(\"ftl_cs: reclaiming space...\\n\");\n pr_debug(\"NumTransferUnits == %x\\n\", part->header.NumTransferUnits);\n /* Pick the least erased transfer unit */\n best = 0xffffffff; xfer = 0xffff;\n do {\n\tqueued = 0;\n\tfor (i = 0; i < part->header.NumTransferUnits; i++) {\n\t int n=0;\n\t if (part->XferInfo[i].state == XFER_UNKNOWN) {\n\t\tpr_debug(\"XferInfo[%d].state == XFER_UNKNOWN\\n\",i);\n\t\tn=1;\n\t\terase_xfer(part, i);\n\t }\n\t if (part->XferInfo[i].state == XFER_ERASING) {\n\t\tpr_debug(\"XferInfo[%d].state == XFER_ERASING\\n\",i);\n\t\tn=1;\n\t\tqueued = 1;\n\t }\n\t else if (part->XferInfo[i].state == XFER_ERASED) {\n\t\tpr_debug(\"XferInfo[%d].state == XFER_ERASED\\n\",i);\n\t\tn=1;\n\t\tprepare_xfer(part, i);\n\t }\n\t if (part->XferInfo[i].state == XFER_PREPARED) {\n\t\tpr_debug(\"XferInfo[%d].state == XFER_PREPARED\\n\",i);\n\t\tn=1;\n\t\tif (part->XferInfo[i].EraseCount <= best) {\n\t\t best = part->XferInfo[i].EraseCount;\n\t\t xfer = i;\n\t\t}\n\t }\n\t\tif (!n)\n\t\t pr_debug(\"XferInfo[%d].state == %x\\n\",i, part->XferInfo[i].state);\n\n\t}\n\tif (xfer == 0xffff) {\n\t if (queued) {\n\t\tpr_debug(\"ftl_cs: waiting for transfer \"\n\t\t \"unit to be prepared...\\n\");\n\t\tmtd_sync(part->mbd.mtd);\n\t } else {\n\t\tstatic int ne = 0;\n\t\tif (++ne < 5)\n\t\t printk(KERN_NOTICE \"ftl_cs: reclaim failed: no \"\n\t\t\t \"suitable transfer units!\\n\");\n\t\telse\n\t\t pr_debug(\"ftl_cs: reclaim failed: no \"\n\t\t\t \"suitable transfer units!\\n\");\n\n\t\treturn -EIO;\n\t }\n\t}\n } while (xfer == 0xffff);\n\n eun = 0;\n if ((jiffies % shuffle_freq) == 0) {\n\tpr_debug(\"ftl_cs: recycling freshest block...\\n\");\n\tbest = 0xffffffff;\n\tfor (i = 0; i < part->DataUnits; i++)\n\t if (part->EUNInfo[i].EraseCount <= best) {\n\t\tbest = part->EUNInfo[i].EraseCount;\n\t\teun = i;\n\t }\n } else {\n\tbest = 0;\n\tfor (i = 0; i < part->DataUnits; i++)\n\t if (part->EUNInfo[i].Deleted >= best) {\n\t\tbest = part->EUNInfo[i].Deleted;\n\t\teun = i;\n\t }\n\tif (best == 0) {\n\t static int ne = 0;\n\t if (++ne < 5)\n\t\tprintk(KERN_NOTICE \"ftl_cs: reclaim failed: \"\n\t\t \"no free blocks!\\n\");\n\t else\n\t\tpr_debug(\"ftl_cs: reclaim failed: \"\n\t\t \"no free blocks!\\n\");\n\n\t return -EIO;\n\t}\n }\n ret = copy_erase_unit(part, eun, xfer);\n if (!ret)\n\terase_xfer(part, xfer);\n else\n\tprintk(KERN_NOTICE \"ftl_cs: copy_erase_unit failed!\\n\");\n return ret;\n} /* reclaim_block */\n\n/*======================================================================\n\n Find_free() searches for a free block. If necessary, it updates\n the BAM cache for the erase unit containing the free block. It\n returns the block index -- the erase unit is just the currently\n cached unit. If there are no free blocks, it returns 0 -- this\n is never a valid data block because it contains the header.\n\n======================================================================*/\n\n#ifdef PSYCHO_DEBUG\nstatic void dump_lists(partition_t *part)\n{\n int i;\n printk(KERN_DEBUG \"ftl_cs: Free total = %d\\n\", part->FreeTotal);\n for (i = 0; i < part->DataUnits; i++)\n\tprintk(KERN_DEBUG \"ftl_cs: unit %d: %d phys, %d free, \"\n\t \"%d deleted\\n\", i,\n\t part->EUNInfo[i].Offset >> part->header.EraseUnitSize,\n\t part->EUNInfo[i].Free, part->EUNInfo[i].Deleted);\n}", "static void ae7_post_dsp_asi_stream_setup(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\n\tmutex_lock(&spec->chipio_mutex);": "static void ae7_post_dsp_asi_stream_setup(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\n\tmutex_lock(&spec->chipio_mutex);\n\n\tsnd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0, 0x725, 0x81);\n\tca0113_mmio_command_set(codec, 0x30, 0x2b, 0x00);\n\n\tchipio_set_conn_rate_no_mutex(codec, 0x70, SR_96_000);\n\n\tchipio_set_stream_source_dest(codec, 0x05, 0x43, 0x00);\n\tchipio_set_stream_source_dest(codec, 0x18, 0x09, 0xd0);\n\n\tchipio_set_conn_rate_no_mutex(codec, 0xd0, SR_96_000);\n\tchipio_set_stream_channels(codec, 0x18, 6);\n\tchipio_set_stream_control(codec, 0x18, 1);\n\n\tchipio_set_control_param_no_mutex(codec, CONTROL_PARAM_ASI, 4);\n\n\tmutex_unlock(&spec->chipio_mutex);\n}", "static void alc_headset_mode_unplugged(struct hda_codec *codec)\n{\n\tstruct alc_spec *spec = codec->spec;\n\tstatic const struct coef_fw coef0255[] = {\n\t\tWRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */": "static void alc_headset_mode_unplugged(struct hda_codec *codec)\n{\n\tstruct alc_spec *spec = codec->spec;\n\tstatic const struct coef_fw coef0255[] = {\n\t\tWRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */\n\t\tWRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */\n\t\tUPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/\n\t\tWRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */\n\t\tWRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0256[] = {\n\t\tWRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */\n\t\tWRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */\n\t\tWRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */\n\t\tWRITE_COEFEX(0x57, 0x03, 0x09a3), /* Direct Drive HP Amp control */\n\t\tUPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0233[] = {\n\t\tWRITE_COEF(0x1b, 0x0c0b),\n\t\tWRITE_COEF(0x45, 0xc429),\n\t\tUPDATE_COEF(0x35, 0x4000, 0),\n\t\tWRITE_COEF(0x06, 0x2104),\n\t\tWRITE_COEF(0x1a, 0x0001),\n\t\tWRITE_COEF(0x26, 0x0004),\n\t\tWRITE_COEF(0x32, 0x42a3),\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0288[] = {\n\t\tUPDATE_COEF(0x4f, 0xfcc0, 0xc400),\n\t\tUPDATE_COEF(0x50, 0x2000, 0x2000),\n\t\tUPDATE_COEF(0x56, 0x0006, 0x0006),\n\t\tUPDATE_COEF(0x66, 0x0008, 0),\n\t\tUPDATE_COEF(0x67, 0x2000, 0),\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0298[] = {\n\t\tUPDATE_COEF(0x19, 0x1300, 0x0300),\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0292[] = {\n\t\tWRITE_COEF(0x76, 0x000e),\n\t\tWRITE_COEF(0x6c, 0x2400),\n\t\tWRITE_COEF(0x18, 0x7308),\n\t\tWRITE_COEF(0x6b, 0xc429),\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0293[] = {\n\t\tUPDATE_COEF(0x10, 7<<8, 6<<8), /* SET Line1 JD to 0 */\n\t\tUPDATE_COEFEX(0x57, 0x05, 1<<15|1<<13, 0x0), /* SET charge pump by verb */\n\t\tUPDATE_COEFEX(0x57, 0x03, 1<<10, 1<<10), /* SET EN_OSW to 1 */\n\t\tUPDATE_COEF(0x1a, 1<<3, 1<<3), /* Combo JD gating with LINE1-VREFO */\n\t\tWRITE_COEF(0x45, 0xc429), /* Set to TRS type */\n\t\tUPDATE_COEF(0x4a, 0x000f, 0x000e), /* Combo Jack auto detect */\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0668[] = {\n\t\tWRITE_COEF(0x15, 0x0d40),\n\t\tWRITE_COEF(0xb7, 0x802b),\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0225[] = {\n\t\tUPDATE_COEF(0x63, 3<<14, 0),\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0274[] = {\n\t\tUPDATE_COEF(0x4a, 0x0100, 0),\n\t\tUPDATE_COEFEX(0x57, 0x05, 0x4000, 0),\n\t\tUPDATE_COEF(0x6b, 0xf000, 0x5000),\n\t\tUPDATE_COEF(0x4a, 0x0010, 0),\n\t\tUPDATE_COEF(0x4a, 0x0c00, 0x0c00),\n\t\tWRITE_COEF(0x45, 0x5289),\n\t\tUPDATE_COEF(0x4a, 0x0c00, 0),\n\t\t{}\n\t};\n\n\tif (spec->no_internal_mic_pin) {\n\t\talc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);\n\t\treturn;\n\t}\n\n\tswitch (codec->core.vendor_id) {\n\tcase 0x10ec0255:\n\t\talc_process_coef_fw(codec, coef0255);\n\t\tbreak;\n\tcase 0x10ec0230:\n\tcase 0x10ec0236:\n\tcase 0x10ec0256:\n\t\talc_process_coef_fw(codec, coef0256);\n\t\tbreak;\n\tcase 0x10ec0234:\n\tcase 0x10ec0274:\n\tcase 0x10ec0294:\n\t\talc_process_coef_fw(codec, coef0274);\n\t\tbreak;\n\tcase 0x10ec0233:\n\tcase 0x10ec0283:\n\t\talc_process_coef_fw(codec, coef0233);\n\t\tbreak;\n\tcase 0x10ec0286:\n\tcase 0x10ec0288:\n\t\talc_process_coef_fw(codec, coef0288);\n\t\tbreak;\n\tcase 0x10ec0298:\n\t\talc_process_coef_fw(codec, coef0298);\n\t\talc_process_coef_fw(codec, coef0288);\n\t\tbreak;\n\tcase 0x10ec0292:\n\t\talc_process_coef_fw(codec, coef0292);\n\t\tbreak;\n\tcase 0x10ec0293:\n\t\talc_process_coef_fw(codec, coef0293);\n\t\tbreak;\n\tcase 0x10ec0668:\n\t\talc_process_coef_fw(codec, coef0668);\n\t\tbreak;\n\tcase 0x10ec0215:\n\tcase 0x10ec0225:\n\tcase 0x10ec0285:\n\tcase 0x10ec0295:\n\tcase 0x10ec0289:\n\tcase 0x10ec0299:\n\t\talc_process_coef_fw(codec, alc225_pre_hsmode);\n\t\talc_process_coef_fw(codec, coef0225);\n\t\tbreak;\n\tcase 0x10ec0867:\n\t\talc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);\n\t\tbreak;\n\t}\n\tcodec_dbg(codec, \"Headset jack set to unplugged mode.\\n\");\n}", "static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)\n{\n nodeElt* const huffNode0 = huffNode - 1;\n int nonNullRank;\n int lowS, lowN;": "static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)\n{\n nodeElt* const huffNode0 = huffNode - 1;\n int nonNullRank;\n int lowS, lowN;\n int nodeNb = STARTNODE;\n int n, nodeRoot;\n /* init for parents */\n nonNullRank = (int)maxSymbolValue;\n while(huffNode[nonNullRank].count == 0) nonNullRank--;\n lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;\n huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;\n huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;\n nodeNb++; lowS-=2;\n for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);\n huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */\n\n /* create parents */\n while (nodeNb <= nodeRoot) {\n int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;\n int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;\n huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;\n huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;\n nodeNb++;\n }\n\n /* distribute weights (unlimited tree height) */\n huffNode[nodeRoot].nbBits = 0;\n for (n=nodeRoot-1; n>=STARTNODE; n--)\n huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;\n for (n=0; n<=nonNullRank; n++)\n huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;\n\n return nonNullRank;\n}", "static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc)\n{\n\t/* state variables */\n\tstatic bool fsp_for_next_freq;\n\t/* constant configuration parameters */": "static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc)\n{\n\t/* state variables */\n\tstatic bool fsp_for_next_freq;\n\t/* constant configuration parameters */\n\tconst bool save_restore_clkstop_pd = true;\n\tconst u32 zqcal_before_cc_cutoff = 2400;\n\tconst bool cya_allow_ref_cc = false;\n\tconst bool cya_issue_pc_ref = false;\n\tconst bool opt_cc_short_zcal = true;\n\tconst bool ref_b4_sref_en = false;\n\tconst u32 tZQCAL_lpddr4 = 1000000;\n\tconst bool opt_short_zcal = true;\n\tconst bool opt_do_sw_qrst = true;\n\tconst u32 opt_dvfs_mode = MAN_SR;\n\t/*\n\t * This is the timing table for the source frequency. It does _not_\n\t * necessarily correspond to the actual timing values in the EMC at the\n\t * moment. If the boot BCT differs from the table then this can happen.\n\t * However, we need it for accessing the dram_timings (which are not\n\t * really registers) array for the current frequency.\n\t */\n\tstruct tegra210_emc_timing *fake, *last = emc->last, *next = emc->next;\n\tu32 tRTM, RP_war, R2P_war, TRPab_war, deltaTWATM, W2P_war, tRPST;\n\tu32 mr13_flip_fspwr, mr13_flip_fspop, ramp_up_wait, ramp_down_wait;\n\tu32 zq_wait_long, zq_latch_dvfs_wait_time, tZQCAL_lpddr4_fc_adj;\n\tu32 emc_auto_cal_config, auto_cal_en, emc_cfg, emc_sel_dpd_ctrl;\n\tu32 tFC_lpddr4 = 1000 * next->dram_timings[T_FC_LPDDR4];\n\tu32 bg_reg_mode_change, enable_bglp_reg, enable_bg_reg;\n\tbool opt_zcal_en_cc = false, is_lpddr3 = false;\n\tbool compensate_trimmer_applicable = false;\n\tu32 emc_dbg, emc_cfg_pipe_clk, emc_pin;\n\tu32 src_clk_period, dst_clk_period; /* in picoseconds */\n\tbool shared_zq_resistor = false;\n\tu32 value, dram_type;\n\tu32 opt_dll_mode = 0;\n\tunsigned long delay;\n\tunsigned int i;\n\n\temc_dbg(emc, INFO, \"Running clock change.\\n\");\n\n\t/* XXX fake == last */\n\tfake = tegra210_emc_find_timing(emc, last->rate * 1000UL);\n\tfsp_for_next_freq = !fsp_for_next_freq;\n\n\tvalue = emc_readl(emc, EMC_FBIO_CFG5) & EMC_FBIO_CFG5_DRAM_TYPE_MASK;\n\tdram_type = value >> EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;\n\n\tif (last->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX] & BIT(31))\n\t\tshared_zq_resistor = true;\n\n\tif ((next->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0 &&\n\t last->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0) ||\n\t dram_type == DRAM_TYPE_LPDDR4)\n\t\topt_zcal_en_cc = true;\n\n\tif (dram_type == DRAM_TYPE_DDR3)\n\t\topt_dll_mode = tegra210_emc_get_dll_state(next);\n\n\tif ((next->burst_regs[EMC_FBIO_CFG5_INDEX] & BIT(25)) &&\n\t (dram_type == DRAM_TYPE_LPDDR2))\n\t\tis_lpddr3 = true;\n\n\temc_readl(emc, EMC_CFG);\n\temc_readl(emc, EMC_AUTO_CAL_CONFIG);\n\n\tsrc_clk_period = 1000000000 / last->rate;\n\tdst_clk_period = 1000000000 / next->rate;\n\n\tif (dst_clk_period <= zqcal_before_cc_cutoff)\n\t\ttZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4 - tFC_lpddr4;\n\telse\n\t\ttZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4;\n\n\ttZQCAL_lpddr4_fc_adj /= dst_clk_period;\n\n\temc_dbg = emc_readl(emc, EMC_DBG);\n\temc_pin = emc_readl(emc, EMC_PIN);\n\temc_cfg_pipe_clk = emc_readl(emc, EMC_CFG_PIPE_CLK);\n\n\temc_cfg = next->burst_regs[EMC_CFG_INDEX];\n\temc_cfg &= ~(EMC_CFG_DYN_SELF_REF | EMC_CFG_DRAM_ACPD |\n\t\t EMC_CFG_DRAM_CLKSTOP_SR | EMC_CFG_DRAM_CLKSTOP_PD);\n\temc_sel_dpd_ctrl = next->emc_sel_dpd_ctrl;\n\temc_sel_dpd_ctrl &= ~(EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN |\n\t\t\t EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN |\n\t\t\t EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN |\n\t\t\t EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN |\n\t\t\t EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN);\n\n\temc_dbg(emc, INFO, \"Clock change version: %d\\n\",\n\t\tDVFS_CLOCK_CHANGE_VERSION);\n\temc_dbg(emc, INFO, \"DRAM type = %d\\n\", dram_type);\n\temc_dbg(emc, INFO, \"DRAM dev #: %u\\n\", emc->num_devices);\n\temc_dbg(emc, INFO, \"Next EMC clksrc: 0x%08x\\n\", clksrc);\n\temc_dbg(emc, INFO, \"DLL clksrc: 0x%08x\\n\", next->dll_clk_src);\n\temc_dbg(emc, INFO, \"last rate: %u, next rate %u\\n\", last->rate,\n\t\tnext->rate);\n\temc_dbg(emc, INFO, \"last period: %u, next period: %u\\n\",\n\t\tsrc_clk_period, dst_clk_period);\n\temc_dbg(emc, INFO, \" shared_zq_resistor: %d\\n\", !!shared_zq_resistor);\n\temc_dbg(emc, INFO, \" num_channels: %u\\n\", emc->num_channels);\n\temc_dbg(emc, INFO, \" opt_dll_mode: %d\\n\", opt_dll_mode);\n\n\t/*\n\t * Step 1:\n\t * Pre DVFS SW sequence.\n\t */\n\temc_dbg(emc, STEPS, \"Step 1\\n\");\n\temc_dbg(emc, STEPS, \"Step 1.1: Disable DLL temporarily.\\n\");\n\n\tvalue = emc_readl(emc, EMC_CFG_DIG_DLL);\n\tvalue &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;\n\temc_writel(emc, value, EMC_CFG_DIG_DLL);\n\n\ttegra210_emc_timing_update(emc);\n\n\tfor (i = 0; i < emc->num_channels; i++)\n\t\ttegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,\n\t\t\t\t\t EMC_CFG_DIG_DLL_CFG_DLL_EN, 0);\n\n\temc_dbg(emc, STEPS, \"Step 1.2: Disable AUTOCAL temporarily.\\n\");\n\n\temc_auto_cal_config = next->emc_auto_cal_config;\n\tauto_cal_en = emc_auto_cal_config & EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;\n\temc_auto_cal_config &= ~EMC_AUTO_CAL_CONFIG_AUTO_CAL_START;\n\temc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL;\n\temc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL;\n\temc_auto_cal_config |= auto_cal_en;\n\temc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);\n\temc_readl(emc, EMC_AUTO_CAL_CONFIG); /* Flush write. */\n\n\temc_dbg(emc, STEPS, \"Step 1.3: Disable other power features.\\n\");\n\n\ttegra210_emc_set_shadow_bypass(emc, ACTIVE);\n\temc_writel(emc, emc_cfg, EMC_CFG);\n\temc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);\n\ttegra210_emc_set_shadow_bypass(emc, ASSEMBLY);\n\n\tif (next->periodic_training) {\n\t\ttegra210_emc_reset_dram_clktree_values(next);\n\n\t\tfor (i = 0; i < emc->num_channels; i++)\n\t\t\ttegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,\n\t\t\t\t\t\t EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,\n\t\t\t\t\t\t 0);\n\n\t\tfor (i = 0; i < emc->num_channels; i++)\n\t\t\ttegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,\n\t\t\t\t\t\t EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,\n\t\t\t\t\t\t 0);\n\n\t\ttegra210_emc_start_periodic_compensation(emc);\n\n\t\tdelay = 1000 * tegra210_emc_actual_osc_clocks(last->run_clocks);\n\t\tudelay((delay / last->rate) + 2);\n\n\t\tvalue = periodic_compensation_handler(emc, DVFS_SEQUENCE, fake,\n\t\t\t\t\t\t next);\n\t\tvalue = (value * 128 * next->rate / 1000) / 1000000;\n\n\t\tif (next->periodic_training && value > next->tree_margin)\n\t\t\tcompensate_trimmer_applicable = true;\n\t}\n\n\temc_writel(emc, EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);\n\ttegra210_emc_set_shadow_bypass(emc, ACTIVE);\n\temc_writel(emc, emc_cfg, EMC_CFG);\n\temc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);\n\temc_writel(emc, emc_cfg_pipe_clk | EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON,\n\t\t EMC_CFG_PIPE_CLK);\n\temc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp &\n\t\t\t~EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE,\n\t\t EMC_FDPD_CTRL_CMD_NO_RAMP);\n\n\tbg_reg_mode_change =\n\t\t((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &\n\t\t EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) ^\n\t\t (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &\n\t\t EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD)) ||\n\t\t((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &\n\t\t EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) ^\n\t\t (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &\n\t\t EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD));\n\tenable_bglp_reg =\n\t\t(next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &\n\t\t EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) == 0;\n\tenable_bg_reg =\n\t\t(next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &\n\t\t EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) == 0;\n\n\tif (bg_reg_mode_change) {\n\t\tif (enable_bg_reg)\n\t\t\temc_writel(emc, last->burst_regs\n\t\t\t\t [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &\n\t\t\t\t ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,\n\t\t\t\t EMC_PMACRO_BG_BIAS_CTRL_0);\n\n\t\tif (enable_bglp_reg)\n\t\t\temc_writel(emc, last->burst_regs\n\t\t\t\t [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &\n\t\t\t\t ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,\n\t\t\t\t EMC_PMACRO_BG_BIAS_CTRL_0);\n\t}\n\n\t/* Check if we need to turn on VREF generator. */\n\tif ((((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &\n\t EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 0) &&\n\t ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &\n\t EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 1)) ||\n\t (((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &\n\t EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) == 0) &&\n\t ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &\n\t EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) != 0))) {\n\t\tu32 pad_tx_ctrl =\n\t\t next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];\n\t\tu32 last_pad_tx_ctrl =\n\t\t last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];\n\t\tu32 next_dq_e_ivref, next_dqs_e_ivref;\n\n\t\tnext_dqs_e_ivref = pad_tx_ctrl &\n\t\t\t\t EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF;\n\t\tnext_dq_e_ivref = pad_tx_ctrl &\n\t\t\t\t EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF;\n\t\tvalue = (last_pad_tx_ctrl &\n\t\t\t\t~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF &\n\t\t\t\t~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) |\n\t\t\tnext_dq_e_ivref | next_dqs_e_ivref;\n\t\temc_writel(emc, value, EMC_PMACRO_DATA_PAD_TX_CTRL);\n\t\tudelay(1);\n\t} else if (bg_reg_mode_change) {\n\t\tudelay(1);\n\t}\n\n\ttegra210_emc_set_shadow_bypass(emc, ASSEMBLY);\n\n\t/*\n\t * Step 2:\n\t * Prelock the DLL.\n\t */\n\temc_dbg(emc, STEPS, \"Step 2\\n\");\n\n\tif (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] &\n\t EMC_CFG_DIG_DLL_CFG_DLL_EN) {\n\t\temc_dbg(emc, INFO, \"Prelock enabled for target frequency.\\n\");\n\t\tvalue = tegra210_emc_dll_prelock(emc, clksrc);\n\t\temc_dbg(emc, INFO, \"DLL out: 0x%03x\\n\", value);\n\t} else {\n\t\temc_dbg(emc, INFO, \"Disabling DLL for target frequency.\\n\");\n\t\ttegra210_emc_dll_disable(emc);\n\t}\n\n\t/*\n\t * Step 3:\n\t * Prepare autocal for the clock change.\n\t */\n\temc_dbg(emc, STEPS, \"Step 3\\n\");\n\n\ttegra210_emc_set_shadow_bypass(emc, ACTIVE);\n\temc_writel(emc, next->emc_auto_cal_config2, EMC_AUTO_CAL_CONFIG2);\n\temc_writel(emc, next->emc_auto_cal_config3, EMC_AUTO_CAL_CONFIG3);\n\temc_writel(emc, next->emc_auto_cal_config4, EMC_AUTO_CAL_CONFIG4);\n\temc_writel(emc, next->emc_auto_cal_config5, EMC_AUTO_CAL_CONFIG5);\n\temc_writel(emc, next->emc_auto_cal_config6, EMC_AUTO_CAL_CONFIG6);\n\temc_writel(emc, next->emc_auto_cal_config7, EMC_AUTO_CAL_CONFIG7);\n\temc_writel(emc, next->emc_auto_cal_config8, EMC_AUTO_CAL_CONFIG8);\n\ttegra210_emc_set_shadow_bypass(emc, ASSEMBLY);\n\n\temc_auto_cal_config |= (EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START |\n\t\t\t\tauto_cal_en);\n\temc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);\n\n\t/*\n\t * Step 4:\n\t * Update EMC_CFG. (??)\n\t */\n\temc_dbg(emc, STEPS, \"Step 4\\n\");\n\n\tif (src_clk_period > 50000 && dram_type == DRAM_TYPE_LPDDR4)\n\t\tccfifo_writel(emc, 1, EMC_SELF_REF, 0);\n\telse\n\t\temc_writel(emc, next->emc_cfg_2, EMC_CFG_2);\n\n\t/*\n\t * Step 5:\n\t * Prepare reference variables for ZQCAL regs.\n\t */\n\temc_dbg(emc, STEPS, \"Step 5\\n\");\n\n\tif (dram_type == DRAM_TYPE_LPDDR4)\n\t\tzq_wait_long = max((u32)1, div_o3(1000000, dst_clk_period));\n\telse if (dram_type == DRAM_TYPE_LPDDR2 || is_lpddr3)\n\t\tzq_wait_long = max(next->min_mrs_wait,\n\t\t\t\t div_o3(360000, dst_clk_period)) + 4;\n\telse if (dram_type == DRAM_TYPE_DDR3)\n\t\tzq_wait_long = max((u32)256,\n\t\t\t\t div_o3(320000, dst_clk_period) + 2);\n\telse\n\t\tzq_wait_long = 0;\n\n\t/*\n\t * Step 6:\n\t * Training code - removed.\n\t */\n\temc_dbg(emc, STEPS, \"Step 6\\n\");\n\n\t/*\n\t * Step 7:\n\t * Program FSP reference registers and send MRWs to new FSPWR.\n\t */\n\temc_dbg(emc, STEPS, \"Step 7\\n\");\n\temc_dbg(emc, SUB_STEPS, \"Step 7.1: Bug 200024907 - Patch RP R2P\");\n\n\t/* WAR 200024907 */\n\tif (dram_type == DRAM_TYPE_LPDDR4) {\n\t\tu32 nRTP = 16;\n\n\t\tif (src_clk_period >= 1000000 / 1866) /* 535.91 ps */\n\t\t\tnRTP = 14;\n\n\t\tif (src_clk_period >= 1000000 / 1600) /* 625.00 ps */\n\t\t\tnRTP = 12;\n\n\t\tif (src_clk_period >= 1000000 / 1333) /* 750.19 ps */\n\t\t\tnRTP = 10;\n\n\t\tif (src_clk_period >= 1000000 / 1066) /* 938.09 ps */\n\t\t\tnRTP = 8;\n\n\t\tdeltaTWATM = max_t(u32, div_o3(7500, src_clk_period), 8);\n\n\t\t/*\n\t\t * Originally there was a + .5 in the tRPST calculation.\n\t\t * However since we can't do FP in the kernel and the tRTM\n\t\t * computation was in a floating point ceiling function, adding\n\t\t * one to tRTP should be ok. There is no other source of non\n\t\t * integer values, so the result was always going to be\n\t\t * something for the form: f_ceil(N + .5) = N + 1;\n\t\t */\n\t\ttRPST = (last->emc_mrw & 0x80) >> 7;\n\t\ttRTM = fake->dram_timings[RL] + div_o3(3600, src_clk_period) +\n\t\t\tmax_t(u32, div_o3(7500, src_clk_period), 8) + tRPST +\n\t\t\t1 + nRTP;\n\n\t\temc_dbg(emc, INFO, \"tRTM = %u, EMC_RP = %u\\n\", tRTM,\n\t\t\tnext->burst_regs[EMC_RP_INDEX]);\n\n\t\tif (last->burst_regs[EMC_RP_INDEX] < tRTM) {\n\t\t\tif (tRTM > (last->burst_regs[EMC_R2P_INDEX] +\n\t\t\t\t last->burst_regs[EMC_RP_INDEX])) {\n\t\t\t\tR2P_war = tRTM - last->burst_regs[EMC_RP_INDEX];\n\t\t\t\tRP_war = last->burst_regs[EMC_RP_INDEX];\n\t\t\t\tTRPab_war = last->burst_regs[EMC_TRPAB_INDEX];\n\n\t\t\t\tif (R2P_war > 63) {\n\t\t\t\t\tRP_war = R2P_war +\n\t\t\t\t\t\t last->burst_regs[EMC_RP_INDEX] - 63;\n\n\t\t\t\t\tif (TRPab_war < RP_war)\n\t\t\t\t\t\tTRPab_war = RP_war;\n\n\t\t\t\t\tR2P_war = 63;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tR2P_war = last->burst_regs[EMC_R2P_INDEX];\n\t\t\t\tRP_war = last->burst_regs[EMC_RP_INDEX];\n\t\t\t\tTRPab_war = last->burst_regs[EMC_TRPAB_INDEX];\n\t\t\t}\n\n\t\t\tif (RP_war < deltaTWATM) {\n\t\t\t\tW2P_war = last->burst_regs[EMC_W2P_INDEX]\n\t\t\t\t\t + deltaTWATM - RP_war;\n\t\t\t\tif (W2P_war > 63) {\n\t\t\t\t\tRP_war = RP_war + W2P_war - 63;\n\t\t\t\t\tif (TRPab_war < RP_war)\n\t\t\t\t\t\tTRPab_war = RP_war;\n\t\t\t\t\tW2P_war = 63;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tW2P_war = last->burst_regs[\n\t\t\t\t\t EMC_W2P_INDEX];\n\t\t\t}\n\n\t\t\tif ((last->burst_regs[EMC_W2P_INDEX] ^ W2P_war) ||\n\t\t\t (last->burst_regs[EMC_R2P_INDEX] ^ R2P_war) ||\n\t\t\t (last->burst_regs[EMC_RP_INDEX] ^ RP_war) ||\n\t\t\t (last->burst_regs[EMC_TRPAB_INDEX] ^ TRPab_war)) {\n\t\t\t\temc_writel(emc, RP_war, EMC_RP);\n\t\t\t\temc_writel(emc, R2P_war, EMC_R2P);\n\t\t\t\temc_writel(emc, W2P_war, EMC_W2P);\n\t\t\t\temc_writel(emc, TRPab_war, EMC_TRPAB);\n\t\t\t}\n\n\t\t\ttegra210_emc_timing_update(emc);\n\t\t} else {\n\t\t\temc_dbg(emc, INFO, \"Skipped WAR\\n\");\n\t\t}\n\t}\n\n\tif (!fsp_for_next_freq) {\n\t\tmr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x80;\n\t\tmr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0x00;\n\t} else {\n\t\tmr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x40;\n\t\tmr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0xc0;\n\t}\n\n\tif (dram_type == DRAM_TYPE_LPDDR4) {\n\t\temc_writel(emc, mr13_flip_fspwr, EMC_MRW3);\n\t\temc_writel(emc, next->emc_mrw, EMC_MRW);\n\t\temc_writel(emc, next->emc_mrw2, EMC_MRW2);\n\t}\n\n\t/*\n\t * Step 8:\n\t * Program the shadow registers.\n\t */\n\temc_dbg(emc, STEPS, \"Step 8\\n\");\n\temc_dbg(emc, SUB_STEPS, \"Writing burst_regs\\n\");\n\n\tfor (i = 0; i < next->num_burst; i++) {\n\t\tconst u16 *offsets = emc->offsets->burst;\n\t\tu16 offset;\n\n\t\tif (!offsets[i])\n\t\t\tcontinue;\n\n\t\tvalue = next->burst_regs[i];\n\t\toffset = offsets[i];\n\n\t\tif (dram_type != DRAM_TYPE_LPDDR4 &&\n\t\t (offset == EMC_MRW6 || offset == EMC_MRW7 ||\n\t\t offset == EMC_MRW8 || offset == EMC_MRW9 ||\n\t\t offset == EMC_MRW10 || offset == EMC_MRW11 ||\n\t\t offset == EMC_MRW12 || offset == EMC_MRW13 ||\n\t\t offset == EMC_MRW14 || offset == EMC_MRW15 ||\n\t\t offset == EMC_TRAINING_CTRL))\n\t\t\tcontinue;\n\n\t\t/* Pain... And suffering. */\n\t\tif (offset == EMC_CFG) {\n\t\t\tvalue &= ~EMC_CFG_DRAM_ACPD;\n\t\t\tvalue &= ~EMC_CFG_DYN_SELF_REF;\n\n\t\t\tif (dram_type == DRAM_TYPE_LPDDR4) {\n\t\t\t\tvalue &= ~EMC_CFG_DRAM_CLKSTOP_SR;\n\t\t\t\tvalue &= ~EMC_CFG_DRAM_CLKSTOP_PD;\n\t\t\t}\n\t\t} else if (offset == EMC_MRS_WAIT_CNT &&\n\t\t\t dram_type == DRAM_TYPE_LPDDR2 &&\n\t\t\t opt_zcal_en_cc && !opt_cc_short_zcal &&\n\t\t\t opt_short_zcal) {\n\t\t\tvalue = (value & ~(EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK <<\n\t\t\t\t\t EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)) |\n\t\t\t\t((zq_wait_long & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) <<\n\t\t\t\t\t\t EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);\n\t\t} else if (offset == EMC_ZCAL_WAIT_CNT &&\n\t\t\t dram_type == DRAM_TYPE_DDR3 && opt_zcal_en_cc &&\n\t\t\t !opt_cc_short_zcal && opt_short_zcal) {\n\t\t\tvalue = (value & ~(EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK <<\n\t\t\t\t\t EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT)) |\n\t\t\t\t((zq_wait_long & EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK) <<\n\t\t\t\t\t\t EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);\n\t\t} else if (offset == EMC_ZCAL_INTERVAL && opt_zcal_en_cc) {\n\t\t\tvalue = 0; /* EMC_ZCAL_INTERVAL reset value. */\n\t\t} else if (offset == EMC_PMACRO_AUTOCAL_CFG_COMMON) {\n\t\t\tvalue |= EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS;\n\t\t} else if (offset == EMC_PMACRO_DATA_PAD_TX_CTRL) {\n\t\t\tvalue &= ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |\n\t\t\t\t EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |\n\t\t\t\t EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |\n\t\t\t\t EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);\n\t\t} else if (offset == EMC_PMACRO_CMD_PAD_TX_CTRL) {\n\t\t\tvalue |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;\n\t\t\tvalue &= ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |\n\t\t\t\t EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |\n\t\t\t\t EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |\n\t\t\t\t EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);\n\t\t} else if (offset == EMC_PMACRO_BRICK_CTRL_RFU1) {\n\t\t\tvalue &= 0xf800f800;\n\t\t} else if (offset == EMC_PMACRO_COMMON_PAD_TX_CTRL) {\n\t\t\tvalue &= 0xfffffff0;\n\t\t}\n\n\t\temc_writel(emc, value, offset);\n\t}\n\n\t/* SW addition: do EMC refresh adjustment here. */\n\ttegra210_emc_adjust_timing(emc, next);\n\n\tif (dram_type == DRAM_TYPE_LPDDR4) {\n\t\tvalue = (23 << EMC_MRW_MRW_MA_SHIFT) |\n\t\t\t(next->run_clocks & EMC_MRW_MRW_OP_MASK);\n\t\temc_writel(emc, value, EMC_MRW);\n\t}\n\n\t/* Per channel burst registers. */\n\temc_dbg(emc, SUB_STEPS, \"Writing burst_regs_per_ch\\n\");\n\n\tfor (i = 0; i < next->num_burst_per_ch; i++) {\n\t\tconst struct tegra210_emc_per_channel_regs *burst =\n\t\t\t\temc->offsets->burst_per_channel;\n\n\t\tif (!burst[i].offset)\n\t\t\tcontinue;\n\n\t\tif (dram_type != DRAM_TYPE_LPDDR4 &&\n\t\t (burst[i].offset == EMC_MRW6 ||\n\t\t burst[i].offset == EMC_MRW7 ||\n\t\t burst[i].offset == EMC_MRW8 ||\n\t\t burst[i].offset == EMC_MRW9 ||\n\t\t burst[i].offset == EMC_MRW10 ||\n\t\t burst[i].offset == EMC_MRW11 ||\n\t\t burst[i].offset == EMC_MRW12 ||\n\t\t burst[i].offset == EMC_MRW13 ||\n\t\t burst[i].offset == EMC_MRW14 ||\n\t\t burst[i].offset == EMC_MRW15))\n\t\t\tcontinue;\n\n\t\t/* Filter out second channel if not in DUAL_CHANNEL mode. */\n\t\tif (emc->num_channels < 2 && burst[i].bank >= 1)\n\t\t\tcontinue;\n\n\t\temc_dbg(emc, REG_LISTS, \"(%u) 0x%08x => 0x%08x\\n\", i,\n\t\t\tnext->burst_reg_per_ch[i], burst[i].offset);\n\t\temc_channel_writel(emc, burst[i].bank,\n\t\t\t\t next->burst_reg_per_ch[i],\n\t\t\t\t burst[i].offset);\n\t}\n\n\t/* Vref regs. */\n\temc_dbg(emc, SUB_STEPS, \"Writing vref_regs\\n\");\n\n\tfor (i = 0; i < next->vref_num; i++) {\n\t\tconst struct tegra210_emc_per_channel_regs *vref =\n\t\t\t\t\temc->offsets->vref_per_channel;\n\n\t\tif (!vref[i].offset)\n\t\t\tcontinue;\n\n\t\tif (emc->num_channels < 2 && vref[i].bank >= 1)\n\t\t\tcontinue;\n\n\t\temc_dbg(emc, REG_LISTS, \"(%u) 0x%08x => 0x%08x\\n\", i,\n\t\t\tnext->vref_perch_regs[i], vref[i].offset);\n\t\temc_channel_writel(emc, vref[i].bank, next->vref_perch_regs[i],\n\t\t\t\t vref[i].offset);\n\t}\n\n\t/* Trimmers. */\n\temc_dbg(emc, SUB_STEPS, \"Writing trim_regs\\n\");\n\n\tfor (i = 0; i < next->num_trim; i++) {\n\t\tconst u16 *offsets = emc->offsets->trim;\n\n\t\tif (!offsets[i])\n\t\t\tcontinue;\n\n\t\tif (compensate_trimmer_applicable &&\n\t\t (offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||\n\t\t offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||\n\t\t offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||\n\t\t offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||\n\t\t offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||\n\t\t offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||\n\t\t offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||\n\t\t offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||\n\t\t offsets[i] == EMC_DATA_BRLSHFT_0 ||\n\t\t offsets[i] == EMC_DATA_BRLSHFT_1)) {\n\t\t\tvalue = tegra210_emc_compensate(next, offsets[i]);\n\t\t\temc_dbg(emc, REG_LISTS, \"(%u) 0x%08x => 0x%08x\\n\", i,\n\t\t\t\tvalue, offsets[i]);\n\t\t\temc_dbg(emc, EMA_WRITES, \"0x%08x <= 0x%08x\\n\",\n\t\t\t\t(u32)(u64)offsets[i], value);\n\t\t\temc_writel(emc, value, offsets[i]);\n\t\t} else {\n\t\t\temc_dbg(emc, REG_LISTS, \"(%u) 0x%08x => 0x%08x\\n\", i,\n\t\t\t\tnext->trim_regs[i], offsets[i]);\n\t\t\temc_writel(emc, next->trim_regs[i], offsets[i]);\n\t\t}\n\t}\n\n\t/* Per channel trimmers. */\n\temc_dbg(emc, SUB_STEPS, \"Writing trim_regs_per_ch\\n\");\n\n\tfor (i = 0; i < next->num_trim_per_ch; i++) {\n\t\tconst struct tegra210_emc_per_channel_regs *trim =\n\t\t\t\t&emc->offsets->trim_per_channel[0];\n\t\tunsigned int offset;\n\n\t\tif (!trim[i].offset)\n\t\t\tcontinue;\n\n\t\tif (emc->num_channels < 2 && trim[i].bank >= 1)\n\t\t\tcontinue;\n\n\t\toffset = trim[i].offset;\n\n\t\tif (compensate_trimmer_applicable &&\n\t\t (offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||\n\t\t offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||\n\t\t offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||\n\t\t offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||\n\t\t offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||\n\t\t offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||\n\t\t offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||\n\t\t offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||\n\t\t offset == EMC_DATA_BRLSHFT_0 ||\n\t\t offset == EMC_DATA_BRLSHFT_1)) {\n\t\t\tvalue = tegra210_emc_compensate(next, offset);\n\t\t\temc_dbg(emc, REG_LISTS, \"(%u) 0x%08x => 0x%08x\\n\", i,\n\t\t\t\tvalue, offset);\n\t\t\temc_dbg(emc, EMA_WRITES, \"0x%08x <= 0x%08x\\n\", offset,\n\t\t\t\tvalue);\n\t\t\temc_channel_writel(emc, trim[i].bank, value, offset);\n\t\t} else {\n\t\t\temc_dbg(emc, REG_LISTS, \"(%u) 0x%08x => 0x%08x\\n\", i,\n\t\t\t\tnext->trim_perch_regs[i], offset);\n\t\t\temc_channel_writel(emc, trim[i].bank,\n\t\t\t\t\t next->trim_perch_regs[i], offset);\n\t\t}\n\t}\n\n\temc_dbg(emc, SUB_STEPS, \"Writing burst_mc_regs\\n\");\n\n\tfor (i = 0; i < next->num_mc_regs; i++) {\n\t\tconst u16 *offsets = emc->offsets->burst_mc;\n\t\tu32 *values = next->burst_mc_regs;\n\n\t\temc_dbg(emc, REG_LISTS, \"(%u) 0x%08x => 0x%08x\\n\", i,\n\t\t\tvalues[i], offsets[i]);\n\t\tmc_writel(emc->mc, values[i], offsets[i]);\n\t}\n\n\t/* Registers to be programmed on the faster clock. */\n\tif (next->rate < last->rate) {\n\t\tconst u16 *la = emc->offsets->la_scale;\n\n\t\temc_dbg(emc, SUB_STEPS, \"Writing la_scale_regs\\n\");\n\n\t\tfor (i = 0; i < next->num_up_down; i++) {\n\t\t\temc_dbg(emc, REG_LISTS, \"(%u) 0x%08x => 0x%08x\\n\", i,\n\t\t\t\tnext->la_scale_regs[i], la[i]);\n\t\t\tmc_writel(emc->mc, next->la_scale_regs[i], la[i]);\n\t\t}\n\t}\n\n\t/* Flush all the burst register writes. */\n\tmc_readl(emc->mc, MC_EMEM_ADR_CFG);\n\n\t/*\n\t * Step 9:\n\t * LPDDR4 section A.\n\t */\n\temc_dbg(emc, STEPS, \"Step 9\\n\");\n\n\tvalue = next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX];\n\tvalue &= ~EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK;\n\n\tif (dram_type == DRAM_TYPE_LPDDR4) {\n\t\temc_writel(emc, 0, EMC_ZCAL_INTERVAL);\n\t\temc_writel(emc, value, EMC_ZCAL_WAIT_CNT);\n\n\t\tvalue = emc_dbg | (EMC_DBG_WRITE_MUX_ACTIVE |\n\t\t\t\t EMC_DBG_WRITE_ACTIVE_ONLY);\n\n\t\temc_writel(emc, value, EMC_DBG);\n\t\temc_writel(emc, 0, EMC_ZCAL_INTERVAL);\n\t\temc_writel(emc, emc_dbg, EMC_DBG);\n\t}\n\n\t/*\n\t * Step 10:\n\t * LPDDR4 and DDR3 common section.\n\t */\n\temc_dbg(emc, STEPS, \"Step 10\\n\");\n\n\tif (opt_dvfs_mode == MAN_SR || dram_type == DRAM_TYPE_LPDDR4) {\n\t\tif (dram_type == DRAM_TYPE_LPDDR4)\n\t\t\tccfifo_writel(emc, 0x101, EMC_SELF_REF, 0);\n\t\telse\n\t\t\tccfifo_writel(emc, 0x1, EMC_SELF_REF, 0);\n\n\t\tif (dram_type == DRAM_TYPE_LPDDR4 &&\n\t\t dst_clk_period <= zqcal_before_cc_cutoff) {\n\t\t\tccfifo_writel(emc, mr13_flip_fspwr ^ 0x40, EMC_MRW3, 0);\n\t\t\tccfifo_writel(emc, (next->burst_regs[EMC_MRW6_INDEX] &\n\t\t\t\t\t\t0xFFFF3F3F) |\n\t\t\t\t\t (last->burst_regs[EMC_MRW6_INDEX] &\n\t\t\t\t\t\t0x0000C0C0), EMC_MRW6, 0);\n\t\t\tccfifo_writel(emc, (next->burst_regs[EMC_MRW14_INDEX] &\n\t\t\t\t\t\t0xFFFF0707) |\n\t\t\t\t\t (last->burst_regs[EMC_MRW14_INDEX] &\n\t\t\t\t\t\t0x00003838), EMC_MRW14, 0);\n\n\t\t\tif (emc->num_devices > 1) {\n\t\t\t\tccfifo_writel(emc,\n\t\t\t\t (next->burst_regs[EMC_MRW7_INDEX] &\n\t\t\t\t 0xFFFF3F3F) |\n\t\t\t\t (last->burst_regs[EMC_MRW7_INDEX] &\n\t\t\t\t 0x0000C0C0), EMC_MRW7, 0);\n\t\t\t\tccfifo_writel(emc,\n\t\t\t\t (next->burst_regs[EMC_MRW15_INDEX] &\n\t\t\t\t 0xFFFF0707) |\n\t\t\t\t (last->burst_regs[EMC_MRW15_INDEX] &\n\t\t\t\t 0x00003838), EMC_MRW15, 0);\n\t\t\t}\n\n\t\t\tif (opt_zcal_en_cc) {\n\t\t\t\tif (emc->num_devices < 2)\n\t\t\t\t\tccfifo_writel(emc,\n\t\t\t\t\t\t2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT\n\t\t\t\t\t\t| EMC_ZQ_CAL_ZQ_CAL_CMD,\n\t\t\t\t\t\tEMC_ZQ_CAL, 0);\n\t\t\t\telse if (shared_zq_resistor)\n\t\t\t\t\tccfifo_writel(emc,\n\t\t\t\t\t\t2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT\n\t\t\t\t\t\t| EMC_ZQ_CAL_ZQ_CAL_CMD,\n\t\t\t\t\t\tEMC_ZQ_CAL, 0);\n\t\t\t\telse\n\t\t\t\t\tccfifo_writel(emc,\n\t\t\t\t\t\t EMC_ZQ_CAL_ZQ_CAL_CMD,\n\t\t\t\t\t\t EMC_ZQ_CAL, 0);\n\t\t\t}\n\t\t}\n\t}\n\n\tif (dram_type == DRAM_TYPE_LPDDR4) {\n\t\tvalue = (1000 * fake->dram_timings[T_RP]) / src_clk_period;\n\t\tccfifo_writel(emc, mr13_flip_fspop | 0x8, EMC_MRW3, value);\n\t\tccfifo_writel(emc, 0, 0, tFC_lpddr4 / src_clk_period);\n\t}\n\n\tif (dram_type == DRAM_TYPE_LPDDR4 || opt_dvfs_mode != MAN_SR) {\n\t\tdelay = 30;\n\n\t\tif (cya_allow_ref_cc) {\n\t\t\tdelay += (1000 * fake->dram_timings[T_RP]) /\n\t\t\t\t\tsrc_clk_period;\n\t\t\tdelay += 4000 * fake->dram_timings[T_RFC];\n\t\t}\n\n\t\tccfifo_writel(emc, emc_pin & ~(EMC_PIN_PIN_CKE_PER_DEV |\n\t\t\t\t\t EMC_PIN_PIN_CKEB |\n\t\t\t\t\t EMC_PIN_PIN_CKE),\n\t\t\t EMC_PIN, delay);\n\t}\n\n\t/* calculate reference delay multiplier */\n\tvalue = 1;\n\n\tif (ref_b4_sref_en)\n\t\tvalue++;\n\n\tif (cya_allow_ref_cc)\n\t\tvalue++;\n\n\tif (cya_issue_pc_ref)\n\t\tvalue++;\n\n\tif (dram_type != DRAM_TYPE_LPDDR4) {\n\t\tdelay = ((1000 * fake->dram_timings[T_RP] / src_clk_period) +\n\t\t\t (1000 * fake->dram_timings[T_RFC] / src_clk_period));\n\t\tdelay = value * delay + 20;\n\t} else {\n\t\tdelay = 0;\n\t}\n\n\t/*\n\t * Step 11:\n\t * Ramp down.\n\t */\n\temc_dbg(emc, STEPS, \"Step 11\\n\");\n\n\tccfifo_writel(emc, 0x0, EMC_CFG_SYNC, delay);\n\n\tvalue = emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE | EMC_DBG_WRITE_ACTIVE_ONLY;\n\tccfifo_writel(emc, value, EMC_DBG, 0);\n\n\tramp_down_wait = tegra210_emc_dvfs_power_ramp_down(emc, src_clk_period,\n\t\t\t\t\t\t\t 0);\n\n\t/*\n\t * Step 12:\n\t * And finally - trigger the clock change.\n\t */\n\temc_dbg(emc, STEPS, \"Step 12\\n\");\n\n\tccfifo_writel(emc, 1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE, 0);\n\tvalue &= ~EMC_DBG_WRITE_ACTIVE_ONLY;\n\tccfifo_writel(emc, value, EMC_DBG, 0);\n\n\t/*\n\t * Step 13:\n\t * Ramp up.\n\t */\n\temc_dbg(emc, STEPS, \"Step 13\\n\");\n\n\tramp_up_wait = tegra210_emc_dvfs_power_ramp_up(emc, dst_clk_period, 0);\n\tccfifo_writel(emc, emc_dbg, EMC_DBG, 0);\n\n\t/*\n\t * Step 14:\n\t * Bringup CKE pins.\n\t */\n\temc_dbg(emc, STEPS, \"Step 14\\n\");\n\n\tif (dram_type == DRAM_TYPE_LPDDR4) {\n\t\tvalue = emc_pin | EMC_PIN_PIN_CKE;\n\n\t\tif (emc->num_devices <= 1)\n\t\t\tvalue &= ~(EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV);\n\t\telse\n\t\t\tvalue |= EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV;\n\n\t\tccfifo_writel(emc, value, EMC_PIN, 0);\n\t}\n\n\t/*\n\t * Step 15: (two step 15s ??)\n\t * Calculate zqlatch wait time; has dependency on ramping times.\n\t */\n\temc_dbg(emc, STEPS, \"Step 15\\n\");\n\n\tif (dst_clk_period <= zqcal_before_cc_cutoff) {\n\t\ts32 t = (s32)(ramp_up_wait + ramp_down_wait) /\n\t\t\t(s32)dst_clk_period;\n\t\tzq_latch_dvfs_wait_time = (s32)tZQCAL_lpddr4_fc_adj - t;\n\t} else {\n\t\tzq_latch_dvfs_wait_time = tZQCAL_lpddr4_fc_adj -\n\t\t\tdiv_o3(1000 * next->dram_timings[T_PDEX],\n\t\t\t dst_clk_period);\n\t}\n\n\temc_dbg(emc, INFO, \"tZQCAL_lpddr4_fc_adj = %u\\n\", tZQCAL_lpddr4_fc_adj);\n\temc_dbg(emc, INFO, \"dst_clk_period = %u\\n\",\n\t\tdst_clk_period);\n\temc_dbg(emc, INFO, \"next->dram_timings[T_PDEX] = %u\\n\",\n\t\tnext->dram_timings[T_PDEX]);\n\temc_dbg(emc, INFO, \"zq_latch_dvfs_wait_time = %d\\n\",\n\t\tmax_t(s32, 0, zq_latch_dvfs_wait_time));\n\n\tif (dram_type == DRAM_TYPE_LPDDR4 && opt_zcal_en_cc) {\n\t\tdelay = div_o3(1000 * next->dram_timings[T_PDEX],\n\t\t\t dst_clk_period);\n\n\t\tif (emc->num_devices < 2) {\n\t\t\tif (dst_clk_period > zqcal_before_cc_cutoff)\n\t\t\t\tccfifo_writel(emc,\n\t\t\t\t\t 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |\n\t\t\t\t\t EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,\n\t\t\t\t\t delay);\n\n\t\t\tvalue = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;\n\t\t\tccfifo_writel(emc, value, EMC_MRW3, delay);\n\t\t\tccfifo_writel(emc, 0, EMC_SELF_REF, 0);\n\t\t\tccfifo_writel(emc, 0, EMC_REF, 0);\n\t\t\tccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |\n\t\t\t\t EMC_ZQ_CAL_ZQ_LATCH_CMD,\n\t\t\t\t EMC_ZQ_CAL,\n\t\t\t\t max_t(s32, 0, zq_latch_dvfs_wait_time));\n\t\t} else if (shared_zq_resistor) {\n\t\t\tif (dst_clk_period > zqcal_before_cc_cutoff)\n\t\t\t\tccfifo_writel(emc,\n\t\t\t\t\t 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |\n\t\t\t\t\t EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,\n\t\t\t\t\t delay);\n\n\t\t\tccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |\n\t\t\t\t EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,\n\t\t\t\t max_t(s32, 0, zq_latch_dvfs_wait_time) +\n\t\t\t\t\tdelay);\n\t\t\tccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |\n\t\t\t\t EMC_ZQ_CAL_ZQ_LATCH_CMD,\n\t\t\t\t EMC_ZQ_CAL, 0);\n\n\t\t\tvalue = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;\n\t\t\tccfifo_writel(emc, value, EMC_MRW3, 0);\n\t\t\tccfifo_writel(emc, 0, EMC_SELF_REF, 0);\n\t\t\tccfifo_writel(emc, 0, EMC_REF, 0);\n\n\t\t\tccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |\n\t\t\t\t EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,\n\t\t\t\t tZQCAL_lpddr4 / dst_clk_period);\n\t\t} else {\n\t\t\tif (dst_clk_period > zqcal_before_cc_cutoff)\n\t\t\t\tccfifo_writel(emc, EMC_ZQ_CAL_ZQ_CAL_CMD,\n\t\t\t\t\t EMC_ZQ_CAL, delay);\n\n\t\t\tvalue = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;\n\t\t\tccfifo_writel(emc, value, EMC_MRW3, delay);\n\t\t\tccfifo_writel(emc, 0, EMC_SELF_REF, 0);\n\t\t\tccfifo_writel(emc, 0, EMC_REF, 0);\n\n\t\t\tccfifo_writel(emc, EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,\n\t\t\t\t max_t(s32, 0, zq_latch_dvfs_wait_time));\n\t\t}\n\t}\n\n\t/* WAR: delay for zqlatch */\n\tccfifo_writel(emc, 0, 0, 10);\n\n\t/*\n\t * Step 16:\n\t * LPDDR4 Conditional Training Kickoff. Removed.\n\t */\n\n\t/*\n\t * Step 17:\n\t * MANSR exit self refresh.\n\t */\n\temc_dbg(emc, STEPS, \"Step 17\\n\");\n\n\tif (opt_dvfs_mode == MAN_SR && dram_type != DRAM_TYPE_LPDDR4)\n\t\tccfifo_writel(emc, 0, EMC_SELF_REF, 0);\n\n\t/*\n\t * Step 18:\n\t * Send MRWs to LPDDR3/DDR3.\n\t */\n\temc_dbg(emc, STEPS, \"Step 18\\n\");\n\n\tif (dram_type == DRAM_TYPE_LPDDR2) {\n\t\tccfifo_writel(emc, next->emc_mrw2, EMC_MRW2, 0);\n\t\tccfifo_writel(emc, next->emc_mrw, EMC_MRW, 0);\n\t\tif (is_lpddr3)\n\t\t\tccfifo_writel(emc, next->emc_mrw4, EMC_MRW4, 0);\n\t} else if (dram_type == DRAM_TYPE_DDR3) {\n\t\tif (opt_dll_mode)\n\t\t\tccfifo_writel(emc, next->emc_emrs &\n\t\t\t\t ~EMC_EMRS_USE_EMRS_LONG_CNT, EMC_EMRS, 0);\n\t\tccfifo_writel(emc, next->emc_emrs2 &\n\t\t\t ~EMC_EMRS2_USE_EMRS2_LONG_CNT, EMC_EMRS2, 0);\n\t\tccfifo_writel(emc, next->emc_mrs |\n\t\t\t EMC_EMRS_USE_EMRS_LONG_CNT, EMC_MRS, 0);\n\t}\n\n\t/*\n\t * Step 19:\n\t * ZQCAL for LPDDR3/DDR3\n\t */\n\temc_dbg(emc, STEPS, \"Step 19\\n\");\n\n\tif (opt_zcal_en_cc) {\n\t\tif (dram_type == DRAM_TYPE_LPDDR2) {\n\t\t\tvalue = opt_cc_short_zcal ? 90000 : 360000;\n\t\t\tvalue = div_o3(value, dst_clk_period);\n\t\t\tvalue = value <<\n\t\t\t\tEMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT |\n\t\t\t\tvalue <<\n\t\t\t\tEMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT;\n\t\t\tccfifo_writel(emc, value, EMC_MRS_WAIT_CNT2, 0);\n\n\t\t\tvalue = opt_cc_short_zcal ? 0x56 : 0xab;\n\t\t\tccfifo_writel(emc, 2 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |\n\t\t\t\t\t EMC_MRW_USE_MRW_EXT_CNT |\n\t\t\t\t\t 10 << EMC_MRW_MRW_MA_SHIFT |\n\t\t\t\t\t value << EMC_MRW_MRW_OP_SHIFT,\n\t\t\t\t EMC_MRW, 0);\n\n\t\t\tif (emc->num_devices > 1) {\n\t\t\t\tvalue = 1 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |\n\t\t\t\t\tEMC_MRW_USE_MRW_EXT_CNT |\n\t\t\t\t\t10 << EMC_MRW_MRW_MA_SHIFT |\n\t\t\t\t\tvalue << EMC_MRW_MRW_OP_SHIFT;\n\t\t\t\tccfifo_writel(emc, value, EMC_MRW, 0);\n\t\t\t}\n\t\t} else if (dram_type == DRAM_TYPE_DDR3) {\n\t\t\tvalue = opt_cc_short_zcal ? 0 : EMC_ZQ_CAL_LONG;\n\n\t\t\tccfifo_writel(emc, value |\n\t\t\t\t\t 2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |\n\t\t\t\t\t EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,\n\t\t\t\t\t 0);\n\n\t\t\tif (emc->num_devices > 1) {\n\t\t\t\tvalue = value | 1 << EMC_ZQ_CAL_DEV_SEL_SHIFT |\n\t\t\t\t\t\tEMC_ZQ_CAL_ZQ_CAL_CMD;\n\t\t\t\tccfifo_writel(emc, value, EMC_ZQ_CAL, 0);\n\t\t\t}\n\t\t}\n\t}\n\n\tif (bg_reg_mode_change) {\n\t\ttegra210_emc_set_shadow_bypass(emc, ACTIVE);\n\n\t\tif (ramp_up_wait <= 1250000)\n\t\t\tdelay = (1250000 - ramp_up_wait) / dst_clk_period;\n\t\telse\n\t\t\tdelay = 0;\n\n\t\tccfifo_writel(emc,\n\t\t\t next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX],\n\t\t\t EMC_PMACRO_BG_BIAS_CTRL_0, delay);\n\t\ttegra210_emc_set_shadow_bypass(emc, ASSEMBLY);\n\t}\n\n\t/*\n\t * Step 20:\n\t * Issue ref and optional QRST.\n\t */\n\temc_dbg(emc, STEPS, \"Step 20\\n\");\n\n\tif (dram_type != DRAM_TYPE_LPDDR4)\n\t\tccfifo_writel(emc, 0, EMC_REF, 0);\n\n\tif (opt_do_sw_qrst) {\n\t\tccfifo_writel(emc, 1, EMC_ISSUE_QRST, 0);\n\t\tccfifo_writel(emc, 0, EMC_ISSUE_QRST, 2);\n\t}\n\n\t/*\n\t * Step 21:\n\t * Restore ZCAL and ZCAL interval.\n\t */\n\temc_dbg(emc, STEPS, \"Step 21\\n\");\n\n\tif (save_restore_clkstop_pd || opt_zcal_en_cc) {\n\t\tccfifo_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,\n\t\t\t EMC_DBG, 0);\n\t\tif (opt_zcal_en_cc && dram_type != DRAM_TYPE_LPDDR4)\n\t\t\tccfifo_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],\n\t\t\t\t EMC_ZCAL_INTERVAL, 0);\n\n\t\tif (save_restore_clkstop_pd)\n\t\t\tccfifo_writel(emc, next->burst_regs[EMC_CFG_INDEX] &\n\t\t\t\t\t\t~EMC_CFG_DYN_SELF_REF,\n\t\t\t\t EMC_CFG, 0);\n\t\tccfifo_writel(emc, emc_dbg, EMC_DBG, 0);\n\t}\n\n\t/*\n\t * Step 22:\n\t * Restore EMC_CFG_PIPE_CLK.\n\t */\n\temc_dbg(emc, STEPS, \"Step 22\\n\");\n\n\tccfifo_writel(emc, emc_cfg_pipe_clk, EMC_CFG_PIPE_CLK, 0);\n\n\tif (bg_reg_mode_change) {\n\t\tif (enable_bg_reg)\n\t\t\temc_writel(emc,\n\t\t\t\t next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &\n\t\t\t\t\t~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,\n\t\t\t\t EMC_PMACRO_BG_BIAS_CTRL_0);\n\t\telse\n\t\t\temc_writel(emc,\n\t\t\t\t next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &\n\t\t\t\t\t~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,\n\t\t\t\t EMC_PMACRO_BG_BIAS_CTRL_0);\n\t}\n\n\t/*\n\t * Step 23:\n\t */\n\temc_dbg(emc, STEPS, \"Step 23\\n\");\n\n\tvalue = emc_readl(emc, EMC_CFG_DIG_DLL);\n\tvalue |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;\n\tvalue &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;\n\tvalue &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;\n\tvalue &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;\n\tvalue = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |\n\t\t(2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);\n\temc_writel(emc, value, EMC_CFG_DIG_DLL);\n\n\ttegra210_emc_do_clock_change(emc, clksrc);\n\n\t/*\n\t * Step 24:\n\t * Save training results. Removed.\n\t */\n\n\t/*\n\t * Step 25:\n\t * Program MC updown registers.\n\t */\n\temc_dbg(emc, STEPS, \"Step 25\\n\");\n\n\tif (next->rate > last->rate) {\n\t\tfor (i = 0; i < next->num_up_down; i++)\n\t\t\tmc_writel(emc->mc, next->la_scale_regs[i],\n\t\t\t\t emc->offsets->la_scale[i]);\n\n\t\ttegra210_emc_timing_update(emc);\n\t}\n\n\t/*\n\t * Step 26:\n\t * Restore ZCAL registers.\n\t */\n\temc_dbg(emc, STEPS, \"Step 26\\n\");\n\n\tif (dram_type == DRAM_TYPE_LPDDR4) {\n\t\ttegra210_emc_set_shadow_bypass(emc, ACTIVE);\n\t\temc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],\n\t\t\t EMC_ZCAL_WAIT_CNT);\n\t\temc_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],\n\t\t\t EMC_ZCAL_INTERVAL);\n\t\ttegra210_emc_set_shadow_bypass(emc, ASSEMBLY);\n\t}\n\n\tif (dram_type != DRAM_TYPE_LPDDR4 && opt_zcal_en_cc &&\n\t !opt_short_zcal && opt_cc_short_zcal) {\n\t\tudelay(2);\n\n\t\ttegra210_emc_set_shadow_bypass(emc, ACTIVE);\n\t\tif (dram_type == DRAM_TYPE_LPDDR2)\n\t\t\temc_writel(emc, next->burst_regs[EMC_MRS_WAIT_CNT_INDEX],\n\t\t\t\t EMC_MRS_WAIT_CNT);\n\t\telse if (dram_type == DRAM_TYPE_DDR3)\n\t\t\temc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],\n\t\t\t\t EMC_ZCAL_WAIT_CNT);\n\t\ttegra210_emc_set_shadow_bypass(emc, ASSEMBLY);\n\t}\n\n\t/*\n\t * Step 27:\n\t * Restore EMC_CFG, FDPD registers.\n\t */\n\temc_dbg(emc, STEPS, \"Step 27\\n\");\n\n\ttegra210_emc_set_shadow_bypass(emc, ACTIVE);\n\temc_writel(emc, next->burst_regs[EMC_CFG_INDEX], EMC_CFG);\n\ttegra210_emc_set_shadow_bypass(emc, ASSEMBLY);\n\temc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp,\n\t\t EMC_FDPD_CTRL_CMD_NO_RAMP);\n\temc_writel(emc, next->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);\n\n\t/*\n\t * Step 28:\n\t * Training recover. Removed.\n\t */\n\temc_dbg(emc, STEPS, \"Step 28\\n\");\n\n\ttegra210_emc_set_shadow_bypass(emc, ACTIVE);\n\temc_writel(emc,\n\t\t next->burst_regs[EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX],\n\t\t EMC_PMACRO_AUTOCAL_CFG_COMMON);\n\ttegra210_emc_set_shadow_bypass(emc, ASSEMBLY);\n\n\t/*\n\t * Step 29:\n\t * Power fix WAR.\n\t */\n\temc_dbg(emc, STEPS, \"Step 29\\n\");\n\n\temc_writel(emc, EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 |\n\t\t EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 |\n\t\t EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 |\n\t\t EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 |\n\t\t EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 |\n\t\t EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 |\n\t\t EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 |\n\t\t EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7,\n\t\t EMC_PMACRO_CFG_PM_GLOBAL_0);\n\temc_writel(emc, EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR,\n\t\t EMC_PMACRO_TRAINING_CTRL_0);\n\temc_writel(emc, EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR,\n\t\t EMC_PMACRO_TRAINING_CTRL_1);\n\temc_writel(emc, 0, EMC_PMACRO_CFG_PM_GLOBAL_0);\n\n\t/*\n\t * Step 30:\n\t * Re-enable autocal.\n\t */\n\temc_dbg(emc, STEPS, \"Step 30: Re-enable DLL and AUTOCAL\\n\");\n\n\tif (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] & EMC_CFG_DIG_DLL_CFG_DLL_EN) {\n\t\tvalue = emc_readl(emc, EMC_CFG_DIG_DLL);\n\t\tvalue |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;\n\t\tvalue |= EMC_CFG_DIG_DLL_CFG_DLL_EN;\n\t\tvalue &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;\n\t\tvalue &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;\n\t\tvalue = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |\n\t\t\t(2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);\n\t\temc_writel(emc, value, EMC_CFG_DIG_DLL);\n\t\ttegra210_emc_timing_update(emc);\n\t}\n\n\temc_writel(emc, next->emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);\n\n\t/* Done! Yay. */\n}", "static void update_rq_clock_task(struct rq *rq, s64 delta)\n{\n/*\n * In theory, the compile should just see 0 here, and optimize out the call\n * to sched_rt_avg_update. But I don't trust it...": "static void update_rq_clock_task(struct rq *rq, s64 delta)\n{\n/*\n * In theory, the compile should just see 0 here, and optimize out the call\n * to sched_rt_avg_update. But I don't trust it...\n */\n\ts64 __maybe_unused steal = 0, irq_delta = 0;\n\n#ifdef CONFIG_IRQ_TIME_ACCOUNTING\n\tirq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;\n\n\t/*\n\t * Since irq_time is only updated on {soft,}irq_exit, we might run into\n\t * this case when a previous update_rq_clock() happened inside a\n\t * {soft,}irq region.\n\t *\n\t * When this happens, we stop ->clock_task and only update the\n\t * prev_irq_time stamp to account for the part that fit, so that a next\n\t * update will consume the rest. This ensures ->clock_task is\n\t * monotonic.\n\t *\n\t * It does however cause some slight miss-attribution of {soft,}irq\n\t * time, a more accurate solution would be to update the irq_time using\n\t * the current rq->clock timestamp, except that would require using\n\t * atomic ops.\n\t */\n\tif (irq_delta > delta)\n\t\tirq_delta = delta;\n\n\trq->prev_irq_time += irq_delta;\n\tdelta -= irq_delta;\n#endif\n#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING\n\tif (static_key_false((¶virt_steal_rq_enabled))) {\n\t\tsteal = paravirt_steal_clock(cpu_of(rq));\n\t\tsteal -= rq->prev_steal_time_rq;\n\n\t\tif (unlikely(steal > delta))\n\t\t\tsteal = delta;\n\n\t\trq->prev_steal_time_rq += steal;\n\t\tdelta -= steal;\n\t}\n#endif\n\n\trq->clock_task += delta;\n\n#ifdef CONFIG_HAVE_SCHED_AVG_IRQ\n\tif ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))\n\t\tupdate_irq_load_avg(rq, irq_delta + steal);\n#endif\n\tupdate_rq_clock_pelt(rq, delta);\n}", "static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)\n{\n\t/* Everything in here is from the Windows driver */\n/* define for compgain calculation */\n#if 0": "static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)\n{\n\t/* Everything in here is from the Windows driver */\n/* define for compgain calculation */\n#if 0\n#define COMPGAIN(base, curexp, newexp) \\\n (u8) ((((float) base - 128.0) * ((float) curexp / (float) newexp)) + 128.5)\n#define EXP_FROM_COMP(basecomp, curcomp, curexp) \\\n (u16)((float)curexp * (float)(u8)(curcomp + 128) / \\\n (float)(u8)(basecomp - 128))\n#else\n /* equivalent functions without floating point math */\n#define COMPGAIN(base, curexp, newexp) \\\n (u8)(128 + (((u32)(2*(base-128)*curexp + newexp)) / (2 * newexp)))\n#define EXP_FROM_COMP(basecomp, curcomp, curexp) \\\n (u16)(((u32)(curexp * (u8)(curcomp + 128)) / (u8)(basecomp - 128)))\n#endif\n\n\tstruct sd *sd = (struct sd *) gspca_dev;\n\tint currentexp = sd->params.exposure.coarseExpLo +\n\t\t\t sd->params.exposure.coarseExpHi * 256;\n\tint ret, startexp;\n\n\tif (on) {\n\t\tint cj = sd->params.flickerControl.coarseJump;\n\t\tsd->params.flickerControl.flickerMode = 1;\n\t\tsd->params.flickerControl.disabled = 0;\n\t\tif (sd->params.exposure.expMode != 2) {\n\t\t\tsd->params.exposure.expMode = 2;\n\t\t\tsd->exposure_status = EXPOSURE_NORMAL;\n\t\t}\n\t\tcurrentexp = currentexp << sd->params.exposure.gain;\n\t\tsd->params.exposure.gain = 0;\n\t\t/* round down current exposure to nearest value */\n\t\tstartexp = (currentexp + ROUND_UP_EXP_FOR_FLICKER) / cj;\n\t\tif (startexp < 1)\n\t\t\tstartexp = 1;\n\t\tstartexp = (startexp * cj) - 1;\n\t\tif (FIRMWARE_VERSION(1, 2))\n\t\t\twhile (startexp > MAX_EXP_102)\n\t\t\t\tstartexp -= cj;\n\t\telse\n\t\t\twhile (startexp > MAX_EXP)\n\t\t\t\tstartexp -= cj;\n\t\tsd->params.exposure.coarseExpLo = startexp & 0xff;\n\t\tsd->params.exposure.coarseExpHi = startexp >> 8;\n\t\tif (currentexp > startexp) {\n\t\t\tif (currentexp > (2 * startexp))\n\t\t\t\tcurrentexp = 2 * startexp;\n\t\t\tsd->params.exposure.redComp =\n\t\t\t\tCOMPGAIN(COMP_RED, currentexp, startexp);\n\t\t\tsd->params.exposure.green1Comp =\n\t\t\t\tCOMPGAIN(COMP_GREEN1, currentexp, startexp);\n\t\t\tsd->params.exposure.green2Comp =\n\t\t\t\tCOMPGAIN(COMP_GREEN2, currentexp, startexp);\n\t\t\tsd->params.exposure.blueComp =\n\t\t\t\tCOMPGAIN(COMP_BLUE, currentexp, startexp);\n\t\t} else {\n\t\t\tsd->params.exposure.redComp = COMP_RED;\n\t\t\tsd->params.exposure.green1Comp = COMP_GREEN1;\n\t\t\tsd->params.exposure.green2Comp = COMP_GREEN2;\n\t\t\tsd->params.exposure.blueComp = COMP_BLUE;\n\t\t}\n\t\tif (FIRMWARE_VERSION(1, 2))\n\t\t\tsd->params.exposure.compMode = 0;\n\t\telse\n\t\t\tsd->params.exposure.compMode = 1;\n\n\t\tsd->params.apcor.gain1 = 0x18;\n\t\tsd->params.apcor.gain2 = 0x18;\n\t\tsd->params.apcor.gain4 = 0x16;\n\t\tsd->params.apcor.gain8 = 0x14;\n\t} else {\n\t\tsd->params.flickerControl.flickerMode = 0;\n\t\tsd->params.flickerControl.disabled = 1;\n\t\t/* Average equivalent coarse for each comp channel */\n\t\tstartexp = EXP_FROM_COMP(COMP_RED,\n\t\t\t\tsd->params.exposure.redComp, currentexp);\n\t\tstartexp += EXP_FROM_COMP(COMP_GREEN1,\n\t\t\t\tsd->params.exposure.green1Comp, currentexp);\n\t\tstartexp += EXP_FROM_COMP(COMP_GREEN2,\n\t\t\t\tsd->params.exposure.green2Comp, currentexp);\n\t\tstartexp += EXP_FROM_COMP(COMP_BLUE,\n\t\t\t\tsd->params.exposure.blueComp, currentexp);\n\t\tstartexp = startexp >> 2;\n\t\twhile (startexp > MAX_EXP && sd->params.exposure.gain <\n\t\t sd->params.exposure.gainMode - 1) {\n\t\t\tstartexp = startexp >> 1;\n\t\t\t++sd->params.exposure.gain;\n\t\t}\n\t\tif (FIRMWARE_VERSION(1, 2) && startexp > MAX_EXP_102)\n\t\t\tstartexp = MAX_EXP_102;\n\t\tif (startexp > MAX_EXP)\n\t\t\tstartexp = MAX_EXP;\n\t\tsd->params.exposure.coarseExpLo = startexp & 0xff;\n\t\tsd->params.exposure.coarseExpHi = startexp >> 8;\n\t\tsd->params.exposure.redComp = COMP_RED;\n\t\tsd->params.exposure.green1Comp = COMP_GREEN1;\n\t\tsd->params.exposure.green2Comp = COMP_GREEN2;\n\t\tsd->params.exposure.blueComp = COMP_BLUE;\n\t\tsd->params.exposure.compMode = 1;\n\t\tsd->params.apcor.gain1 = 0x18;\n\t\tsd->params.apcor.gain2 = 0x16;\n\t\tsd->params.apcor.gain4 = 0x24;\n\t\tsd->params.apcor.gain8 = 0x34;\n\t}\n\tsd->params.vlOffset.gain1 = 20;\n\tsd->params.vlOffset.gain2 = 24;\n\tsd->params.vlOffset.gain4 = 26;\n\tsd->params.vlOffset.gain8 = 26;\n\n\tif (apply) {\n\t\tret = command_setexposure(gspca_dev);\n\t\tif (ret)\n\t\t\treturn ret;\n\n\t\tret = command_setapcor(gspca_dev);\n\t\tif (ret)\n\t\t\treturn ret;\n\n\t\tret = command_setvloffset(gspca_dev);\n\t\tif (ret)\n\t\t\treturn ret;\n\n\t\tret = command_setflickerctrl(gspca_dev);\n\t\tif (ret)\n\t\t\treturn ret;\n\t}\n\n\treturn 0;\n#undef EXP_FROM_COMP\n#undef COMPGAIN\n}", "static void rtl_hw_start_8106(struct rtl8169_private *tp)\n{\n\trtl_hw_aspm_clkreq_enable(tp, false);\n\n\t/* Force LAN exit from ASPM if Rx/Tx are not idle */": "static void rtl_hw_start_8106(struct rtl8169_private *tp)\n{\n\trtl_hw_aspm_clkreq_enable(tp, false);\n\n\t/* Force LAN exit from ASPM if Rx/Tx are not idle */\n\tRTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);\n\n\tRTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);\n\tRTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);\n\tRTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);\n\n\t/* L0 7us, L1 32us - needed to avoid issues with link-up detection */\n\trtl_set_aspm_entry_latency(tp, 0x2f);\n\n\trtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);\n\n\t/* disable EEE */\n\trtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);\n\n\trtl_pcie_state_l2l3_disable(tp);\n\trtl_hw_aspm_clkreq_enable(tp, true);\n}", "static inline int write_dword (__u32 offset,__u32 x)\n{\n __u32 status;\n\n#ifdef LART_DEBUG": "static inline int write_dword (__u32 offset,__u32 x)\n{\n __u32 status;\n\n#ifdef LART_DEBUG\n printk (KERN_DEBUG \"%s(): 0x%.8x <- 0x%.8x\\n\", __func__, offset, x);\n#endif\n\n /* setup writing */\n write32 (DATA_TO_FLASH (PGM_SETUP),offset);\n\n /* write the data */\n write32 (x,offset);\n\n /* wait for the write to finish */\n do\n\t {\n\t\twrite32 (DATA_TO_FLASH (STATUS_READ),offset);\n\t\tstatus = FLASH_TO_DATA (read32 (offset));\n\t }\n while ((~status & STATUS_BUSY) != 0);\n\n /* put the flash back into command mode */\n write32 (DATA_TO_FLASH (READ_ARRAY),offset);\n\n /* was the write successful? */\n if ((status & STATUS_PGM_ERR) || read32 (offset) != x)\n\t {\n\t\tprintk (KERN_WARNING \"%s: write error at address 0x%.8x.\\n\",module_name,offset);\n\t\treturn (0);\n\t }\n\n return (1);\n}", "static void rtl_hw_start_8168g(struct rtl8169_private *tp)\n{\n\trtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);\n\trtl8168g_set_pause_thresholds(tp, 0x38, 0x48);\n": "static void rtl_hw_start_8168g(struct rtl8169_private *tp)\n{\n\trtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);\n\trtl8168g_set_pause_thresholds(tp, 0x38, 0x48);\n\n\trtl_set_def_aspm_entry_latency(tp);\n\n\trtl_reset_packet_filter(tp);\n\trtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);\n\n\tRTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);\n\n\trtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);\n\trtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);\n\n\trtl8168_config_eee_mac(tp);\n\n\trtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);\n\trtl_eri_clear_bits(tp, 0x1b0, BIT(12));\n\n\trtl_pcie_state_l2l3_disable(tp);\n}", "static void measure(int fd)\n{\n time_t start_time;\n int last_state;\n time_t last_time;": "static void measure(int fd)\n{\n time_t start_time;\n int last_state;\n time_t last_time;\n int curr_state;\n time_t curr_time = 0;\n time_t time_diff;\n time_t active_time = 0;\n time_t sleep_time = 0;\n time_t unknown_time = 0;\n time_t total_time = 0;\n int changes = 0;\n float tmp;\n\n printf(\"Starting measurements\\n\");\n\n last_state = check_powermode(fd);\n start_time = last_time = time(0);\n printf(\" System is in state %s\\n\\n\", state_name(last_state));\n\n while(!endit) {\n\tsleep(1);\n\tcurr_state = check_powermode(fd);\n\n\tif (curr_state != last_state || endit) {\n\t changes++;\n\t curr_time = time(0);\n\t time_diff = curr_time - last_time;\n\n\t if (last_state == 1) active_time += time_diff;\n\t else if (last_state == 0) sleep_time += time_diff;\n\t else unknown_time += time_diff;\n\n\t last_state = curr_state;\n\t last_time = curr_time;\n\n\t printf(\"%s: State-change to %s\\n\", myctime(curr_time),\n\t\t state_name(curr_state));\n\t}\n }\n changes--; /* Compensate for SIGINT */\n\n total_time = time(0) - start_time;\n printf(\"\\nTotal running time: %lus\\n\", curr_time - start_time);\n printf(\" State changed %d times\\n\", changes);\n\n tmp = (float)sleep_time / (float)total_time * 100;\n printf(\" Time in sleep state: %lus (%.2f%%)\\n\", sleep_time, tmp);\n tmp = (float)active_time / (float)total_time * 100;\n printf(\" Time in active state: %lus (%.2f%%)\\n\", active_time, tmp);\n tmp = (float)unknown_time / (float)total_time * 100;\n printf(\" Time in unknown state: %lus (%.2f%%)\\n\", unknown_time, tmp);\n}", "static void test_reuseport_array(void)\n{\n#define REUSEPORT_FD_IDX(err, last) ({ (err) ? last : !last; })\n\n\tconst __u32 array_size = 4, index0 = 0, index3 = 3;": "static void test_reuseport_array(void)\n{\n#define REUSEPORT_FD_IDX(err, last) ({ (err) ? last : !last; })\n\n\tconst __u32 array_size = 4, index0 = 0, index3 = 3;\n\tint types[2] = { SOCK_STREAM, SOCK_DGRAM }, type;\n\t__u64 grpa_cookies[2], sk_cookie, map_cookie;\n\t__s64 grpa_fds64[2] = { -1, -1 }, fd64 = -1;\n\tconst __u32 bad_index = array_size;\n\tint map_fd, err, t, f;\n\t__u32 fds_idx = 0;\n\tint fd;\n\n\tmap_fd = bpf_map_create(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, NULL,\n\t\t\t\tsizeof(__u32), sizeof(__u64), array_size, NULL);\n\tCHECK(map_fd < 0, \"reuseport array create\",\n\t \"map_fd:%d, errno:%d\\n\", map_fd, errno);\n\n\t/* Test lookup/update/delete with invalid index */\n\terr = bpf_map_delete_elem(map_fd, &bad_index);\n\tCHECK(err >= 0 || errno != E2BIG, \"reuseport array del >=max_entries\",\n\t \"err:%d errno:%d\\n\", err, errno);\n\n\terr = bpf_map_update_elem(map_fd, &bad_index, &fd64, BPF_ANY);\n\tCHECK(err >= 0 || errno != E2BIG,\n\t \"reuseport array update >=max_entries\",\n\t \"err:%d errno:%d\\n\", err, errno);\n\n\terr = bpf_map_lookup_elem(map_fd, &bad_index, &map_cookie);\n\tCHECK(err >= 0 || errno != ENOENT,\n\t \"reuseport array update >=max_entries\",\n\t \"err:%d errno:%d\\n\", err, errno);\n\n\t/* Test lookup/delete non existence elem */\n\terr = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);\n\tCHECK(err >= 0 || errno != ENOENT,\n\t \"reuseport array lookup not-exist elem\",\n\t \"err:%d errno:%d\\n\", err, errno);\n\terr = bpf_map_delete_elem(map_fd, &index3);\n\tCHECK(err >= 0 || errno != ENOENT,\n\t \"reuseport array del not-exist elem\",\n\t \"err:%d errno:%d\\n\", err, errno);\n\n\tfor (t = 0; t < ARRAY_SIZE(types); t++) {\n\t\ttype = types[t];\n\n\t\tprepare_reuseport_grp(type, map_fd, sizeof(__u64), grpa_fds64,\n\t\t\t\t grpa_cookies, ARRAY_SIZE(grpa_fds64));\n\n\t\t/* Test BPF_* update flags */\n\t\t/* BPF_EXIST failure case */\n\t\terr = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],\n\t\t\t\t\t BPF_EXIST);\n\t\tCHECK(err >= 0 || errno != ENOENT,\n\t\t \"reuseport array update empty elem BPF_EXIST\",\n\t\t \"sock_type:%d err:%d errno:%d\\n\",\n\t\t type, err, errno);\n\t\tfds_idx = REUSEPORT_FD_IDX(err, fds_idx);\n\n\t\t/* BPF_NOEXIST success case */\n\t\terr = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],\n\t\t\t\t\t BPF_NOEXIST);\n\t\tCHECK(err < 0,\n\t\t \"reuseport array update empty elem BPF_NOEXIST\",\n\t\t \"sock_type:%d err:%d errno:%d\\n\",\n\t\t type, err, errno);\n\t\tfds_idx = REUSEPORT_FD_IDX(err, fds_idx);\n\n\t\t/* BPF_EXIST success case. */\n\t\terr = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],\n\t\t\t\t\t BPF_EXIST);\n\t\tCHECK(err < 0,\n\t\t \"reuseport array update same elem BPF_EXIST\",\n\t\t \"sock_type:%d err:%d errno:%d\\n\", type, err, errno);\n\t\tfds_idx = REUSEPORT_FD_IDX(err, fds_idx);\n\n\t\t/* BPF_NOEXIST failure case */\n\t\terr = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],\n\t\t\t\t\t BPF_NOEXIST);\n\t\tCHECK(err >= 0 || errno != EEXIST,\n\t\t \"reuseport array update non-empty elem BPF_NOEXIST\",\n\t\t \"sock_type:%d err:%d errno:%d\\n\",\n\t\t type, err, errno);\n\t\tfds_idx = REUSEPORT_FD_IDX(err, fds_idx);\n\n\t\t/* BPF_ANY case (always succeed) */\n\t\terr = bpf_map_update_elem(map_fd, &index3, &grpa_fds64[fds_idx],\n\t\t\t\t\t BPF_ANY);\n\t\tCHECK(err < 0,\n\t\t \"reuseport array update same sk with BPF_ANY\",\n\t\t \"sock_type:%d err:%d errno:%d\\n\", type, err, errno);\n\n\t\tfd64 = grpa_fds64[fds_idx];\n\t\tsk_cookie = grpa_cookies[fds_idx];\n\n\t\t/* The same sk cannot be added to reuseport_array twice */\n\t\terr = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_ANY);\n\t\tCHECK(err >= 0 || errno != EBUSY,\n\t\t \"reuseport array update same sk with same index\",\n\t\t \"sock_type:%d err:%d errno:%d\\n\",\n\t\t type, err, errno);\n\n\t\terr = bpf_map_update_elem(map_fd, &index0, &fd64, BPF_ANY);\n\t\tCHECK(err >= 0 || errno != EBUSY,\n\t\t \"reuseport array update same sk with different index\",\n\t\t \"sock_type:%d err:%d errno:%d\\n\",\n\t\t type, err, errno);\n\n\t\t/* Test delete elem */\n\t\terr = bpf_map_delete_elem(map_fd, &index3);\n\t\tCHECK(err < 0, \"reuseport array delete sk\",\n\t\t \"sock_type:%d err:%d errno:%d\\n\",\n\t\t type, err, errno);\n\n\t\t/* Add it back with BPF_NOEXIST */\n\t\terr = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST);\n\t\tCHECK(err < 0,\n\t\t \"reuseport array re-add with BPF_NOEXIST after del\",\n\t\t \"sock_type:%d err:%d errno:%d\\n\", type, err, errno);\n\n\t\t/* Test cookie */\n\t\terr = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);\n\t\tCHECK(err < 0 || sk_cookie != map_cookie,\n\t\t \"reuseport array lookup re-added sk\",\n\t\t \"sock_type:%d err:%d errno:%d sk_cookie:0x%llx map_cookie:0x%llxn\",\n\t\t type, err, errno, sk_cookie, map_cookie);\n\n\t\t/* Test elem removed by close() */\n\t\tfor (f = 0; f < ARRAY_SIZE(grpa_fds64); f++)\n\t\t\tclose(grpa_fds64[f]);\n\t\terr = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);\n\t\tCHECK(err >= 0 || errno != ENOENT,\n\t\t \"reuseport array lookup after close()\",\n\t\t \"sock_type:%d err:%d errno:%d\\n\",\n\t\t type, err, errno);\n\t}\n\n\t/* Test SOCK_RAW */\n\tfd64 = socket(AF_INET6, SOCK_RAW, IPPROTO_UDP);\n\tCHECK(fd64 == -1, \"socket(SOCK_RAW)\", \"err:%d errno:%d\\n\",\n\t err, errno);\n\terr = bpf_map_update_elem(map_fd, &index3, &fd64, BPF_NOEXIST);\n\tCHECK(err >= 0 || errno != ENOTSUPP, \"reuseport array update SOCK_RAW\",\n\t \"err:%d errno:%d\\n\", err, errno);\n\tclose(fd64);\n\n\t/* Close the 64 bit value map */\n\tclose(map_fd);\n\n\t/* Test 32 bit fd */\n\tmap_fd = bpf_map_create(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, NULL,\n\t\t\t\tsizeof(__u32), sizeof(__u32), array_size, NULL);\n\tCHECK(map_fd < 0, \"reuseport array create\",\n\t \"map_fd:%d, errno:%d\\n\", map_fd, errno);\n\tprepare_reuseport_grp(SOCK_STREAM, map_fd, sizeof(__u32), &fd64,\n\t\t\t &sk_cookie, 1);\n\tfd = fd64;\n\terr = bpf_map_update_elem(map_fd, &index3, &fd, BPF_NOEXIST);\n\tCHECK(err < 0, \"reuseport array update 32 bit fd\",\n\t \"err:%d errno:%d\\n\", err, errno);\n\terr = bpf_map_lookup_elem(map_fd, &index3, &map_cookie);\n\tCHECK(err >= 0 || errno != ENOSPC,\n\t \"reuseport array lookup 32 bit fd\",\n\t \"err:%d errno:%d\\n\", err, errno);\n\tclose(fd);\n\tclose(map_fd);\n}", "static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned int rate)\n{\n\tunsigned long flags;\n\tunsigned int phiIncr, coeffIncr, tmp1, tmp2;\n\tunsigned int correctionPerGOF, correctionPerSec, initialDelay;": "static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned int rate)\n{\n\tunsigned long flags;\n\tunsigned int phiIncr, coeffIncr, tmp1, tmp2;\n\tunsigned int correctionPerGOF, correctionPerSec, initialDelay;\n\tunsigned int frameGroupLength, cnt;\n\n\t/*\n\t * We can only decimate by up to a factor of 1/9th the hardware rate.\n\t * Correct the value if an attempt is made to stray outside that limit.\n\t */\n\tif ((rate * 9) < 48000)\n\t\trate = 48000 / 9;\n\n\t/*\n\t * We can not capture at a rate greater than the Input Rate (48000).\n\t * Return an error if an attempt is made to stray outside that limit.\n\t */\n\tif (rate > 48000)\n\t\trate = 48000;\n\n\t/*\n\t * Compute the values used to drive the actual sample rate conversion.\n\t * The following formulas are being computed, using inline assembly\n\t * since we need to use 64 bit arithmetic to compute the values:\n\t *\n\t * coeffIncr = -floor((Fs,out * 2^23) / Fs,in)\n\t * phiIncr = floor((Fs,in * 2^26) / Fs,out)\n\t * correctionPerGOF = floor((Fs,in * 2^26 - Fs,out * phiIncr) /\n\t * GOF_PER_SEC)\n\t * correctionPerSec = Fs,in * 2^26 - Fs,out * phiIncr -\n\t * GOF_PER_SEC * correctionPerGOF\n\t * initialDelay = ceil((24 * Fs,in) / Fs,out)\n\t *\n\t * i.e.\n\t *\n\t * coeffIncr = neg(dividend((Fs,out * 2^23) / Fs,in))\n\t * phiIncr:ulOther = dividend:remainder((Fs,in * 2^26) / Fs,out)\n\t * correctionPerGOF:correctionPerSec =\n\t * \t dividend:remainder(ulOther / GOF_PER_SEC)\n\t * initialDelay = dividend(((24 * Fs,in) + Fs,out - 1) / Fs,out)\n\t */\n\n\ttmp1 = rate << 16;\n\tcoeffIncr = tmp1 / 48000;\n\ttmp1 -= coeffIncr * 48000;\n\ttmp1 <<= 7;\n\tcoeffIncr <<= 7;\n\tcoeffIncr += tmp1 / 48000;\n\tcoeffIncr ^= 0xFFFFFFFF;\n\tcoeffIncr++;\n\ttmp1 = 48000 << 16;\n\tphiIncr = tmp1 / rate;\n\ttmp1 -= phiIncr * rate;\n\ttmp1 <<= 10;\n\tphiIncr <<= 10;\n\ttmp2 = tmp1 / rate;\n\tphiIncr += tmp2;\n\ttmp1 -= tmp2 * rate;\n\tcorrectionPerGOF = tmp1 / GOF_PER_SEC;\n\ttmp1 -= correctionPerGOF * GOF_PER_SEC;\n\tcorrectionPerSec = tmp1;\n\tinitialDelay = DIV_ROUND_UP(48000 * 24, rate);\n\n\t/*\n\t * Fill in the VariDecimate control block.\n\t */\n\tspin_lock_irqsave(&chip->reg_lock, flags);\n\tsnd_cs46xx_poke(chip, BA1_CSRC,\n\t\t((correctionPerSec << 16) & 0xFFFF0000) | (correctionPerGOF & 0xFFFF));\n\tsnd_cs46xx_poke(chip, BA1_CCI, coeffIncr);\n\tsnd_cs46xx_poke(chip, BA1_CD,\n\t\t(((BA1_VARIDEC_BUF_1 + (initialDelay << 2)) << 16) & 0xFFFF0000) | 0x80);\n\tsnd_cs46xx_poke(chip, BA1_CPI, phiIncr);\n\tspin_unlock_irqrestore(&chip->reg_lock, flags);\n\n\t/*\n\t * Figure out the frame group length for the write back task. Basically,\n\t * this is just the factors of 24000 (2^6*3*5^3) that are not present in\n\t * the output sample rate.\n\t */\n\tframeGroupLength = 1;\n\tfor (cnt = 2; cnt <= 64; cnt *= 2) {\n\t\tif (((rate / cnt) * cnt) != rate)\n\t\t\tframeGroupLength *= 2;\n\t}\n\tif (((rate / 3) * 3) != rate) {\n\t\tframeGroupLength *= 3;\n\t}\n\tfor (cnt = 5; cnt <= 125; cnt *= 5) {\n\t\tif (((rate / cnt) * cnt) != rate) \n\t\t\tframeGroupLength *= 5;\n }\n\n\t/*\n\t * Fill in the WriteBack control block.\n\t */\n\tspin_lock_irqsave(&chip->reg_lock, flags);\n\tsnd_cs46xx_poke(chip, BA1_CFG1, frameGroupLength);\n\tsnd_cs46xx_poke(chip, BA1_CFG2, (0x00800000 | frameGroupLength));\n\tsnd_cs46xx_poke(chip, BA1_CCST, 0x0000FFFF);\n\tsnd_cs46xx_poke(chip, BA1_CSPB, ((65536 * rate) / 24000));\n\tsnd_cs46xx_poke(chip, (BA1_CSPB + 4), 0x0000FFFF);\n\tspin_unlock_irqrestore(&chip->reg_lock, flags);\n}", "static int cake_config_diffserv8(struct Qdisc *sch)\n{\n/*\tPruned list of traffic classes for typical applications:\n *\n *\t\tNetwork Control (CS6, CS7)": "static int cake_config_diffserv8(struct Qdisc *sch)\n{\n/*\tPruned list of traffic classes for typical applications:\n *\n *\t\tNetwork Control (CS6, CS7)\n *\t\tMinimum Latency (EF, VA, CS5, CS4)\n *\t\tInteractive Shell (CS2)\n *\t\tLow Latency Transactions (AF2x, TOS4)\n *\t\tVideo Streaming (AF4x, AF3x, CS3)\n *\t\tBog Standard (DF etc.)\n *\t\tHigh Throughput (AF1x, TOS2, CS1)\n *\t\tBackground Traffic (LE)\n *\n *\t\tTotal 8 traffic classes.\n */\n\n\tstruct cake_sched_data *q = qdisc_priv(sch);\n\tu32 mtu = psched_mtu(qdisc_dev(sch));\n\tu64 rate = q->rate_bps;\n\tu32 quantum = 256;\n\tu32 i;\n\n\tq->tin_cnt = 8;\n\n\t/* codepoint to class mapping */\n\tq->tin_index = diffserv8;\n\tq->tin_order = normal_order;\n\n\t/* class characteristics */\n\tfor (i = 0; i < q->tin_cnt; i++) {\n\t\tstruct cake_tin_data *b = &q->tins[i];\n\n\t\tcake_set_rate(b, rate, mtu, us_to_ns(q->target),\n\t\t\t us_to_ns(q->interval));\n\n\t\tb->tin_quantum = max_t(u16, 1U, quantum);\n\n\t\t/* calculate next class's parameters */\n\t\trate *= 7;\n\t\trate >>= 3;\n\n\t\tquantum *= 7;\n\t\tquantum >>= 3;\n\t}\n\n\treturn 0;\n}", "static int _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu)\n{\n\tint err, i, z, gpr, nctl;\n\tint bit_shifter16;\n\tconst int playback = 10;": "static int _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu)\n{\n\tint err, i, z, gpr, nctl;\n\tint bit_shifter16;\n\tconst int playback = 10;\n\tconst int capture = playback + (SND_EMU10K1_PLAYBACK_CHANNELS * 2); /* we reserve 10 voices */\n\tconst int stereo_mix = capture + 2;\n\tconst int tmp = 0x88;\n\tu32 ptr;\n\tstruct snd_emu10k1_fx8010_code *icode = NULL;\n\tstruct snd_emu10k1_fx8010_control_gpr *controls = NULL, *ctl;\n\tu32 *gpr_map;\n\n\terr = -ENOMEM;\n\ticode = kzalloc(sizeof(*icode), GFP_KERNEL);\n\tif (!icode)\n\t\treturn err;\n\n\ticode->gpr_map = kcalloc(512 + 256 + 256 + 2 * 1024,\n\t\t\t\t sizeof(u_int32_t), GFP_KERNEL);\n\tif (!icode->gpr_map)\n\t\tgoto __err_gpr;\n\tcontrols = kcalloc(SND_EMU10K1_GPR_CONTROLS,\n\t\t\t sizeof(*controls), GFP_KERNEL);\n\tif (!controls)\n\t\tgoto __err_ctrls;\n\n\tgpr_map = icode->gpr_map;\n\n\ticode->tram_data_map = icode->gpr_map + 512;\n\ticode->tram_addr_map = icode->tram_data_map + 256;\n\ticode->code = icode->tram_addr_map + 256;\n\n\t/* clear free GPRs */\n\tfor (i = 0; i < 512; i++)\n\t\tset_bit(i, icode->gpr_valid);\n\t\t\n\t/* clear TRAM data & address lines */\n\tfor (i = 0; i < 256; i++)\n\t\tset_bit(i, icode->tram_valid);\n\n\tstrcpy(icode->name, \"Audigy DSP code for ALSA\");\n\tptr = 0;\n\tnctl = 0;\n\tgpr = stereo_mix + 10;\n\tgpr_map[gpr++] = 0x00007fff;\n\tgpr_map[gpr++] = 0x00008000;\n\tgpr_map[gpr++] = 0x0000ffff;\n\tbit_shifter16 = gpr;\n\n\t/* stop FX processor */\n\tsnd_emu10k1_ptr_write(emu, A_DBG, 0, (emu->fx8010.dbg = 0) | A_DBG_SINGLE_STEP);\n\n#if 1\n\t/* PCM front Playback Volume (independent from stereo mix)\n\t * playback = 0 + ( gpr * FXBUS_PCM_LEFT_FRONT >> 31)\n\t * where gpr contains attenuation from corresponding mixer control\n\t * (snd_emu10k1_init_stereo_control)\n\t */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LEFT_FRONT));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+1), A_C_00000000, A_GPR(gpr+1), A_FXBUS(FXBUS_PCM_RIGHT_FRONT));\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"PCM Front Playback Volume\", gpr, 100);\n\tgpr += 2;\n\n\t/* PCM Surround Playback (independent from stereo mix) */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+2), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LEFT_REAR));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+3), A_C_00000000, A_GPR(gpr+1), A_FXBUS(FXBUS_PCM_RIGHT_REAR));\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"PCM Surround Playback Volume\", gpr, 100);\n\tgpr += 2;\n\t\n\t/* PCM Side Playback (independent from stereo mix) */\n\tif (emu->card_capabilities->spk71) {\n\t\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+6), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LEFT_SIDE));\n\t\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+7), A_C_00000000, A_GPR(gpr+1), A_FXBUS(FXBUS_PCM_RIGHT_SIDE));\n\t\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"PCM Side Playback Volume\", gpr, 100);\n\t\tgpr += 2;\n\t}\n\n\t/* PCM Center Playback (independent from stereo mix) */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+4), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_CENTER));\n\tsnd_emu10k1_init_mono_control(&controls[nctl++], \"PCM Center Playback Volume\", gpr, 100);\n\tgpr++;\n\n\t/* PCM LFE Playback (independent from stereo mix) */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+5), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LFE));\n\tsnd_emu10k1_init_mono_control(&controls[nctl++], \"PCM LFE Playback Volume\", gpr, 100);\n\tgpr++;\n\t\n\t/*\n\t * Stereo Mix\n\t */\n\t/* Wave (PCM) Playback Volume (will be renamed later) */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(stereo_mix), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LEFT));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(stereo_mix+1), A_C_00000000, A_GPR(gpr+1), A_FXBUS(FXBUS_PCM_RIGHT));\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"Wave Playback Volume\", gpr, 100);\n\tgpr += 2;\n\n\t/* Synth Playback */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(stereo_mix+0), A_GPR(stereo_mix+0), A_GPR(gpr), A_FXBUS(FXBUS_MIDI_LEFT));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(stereo_mix+1), A_GPR(stereo_mix+1), A_GPR(gpr+1), A_FXBUS(FXBUS_MIDI_RIGHT));\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"Synth Playback Volume\", gpr, 100);\n\tgpr += 2;\n\n\t/* Wave (PCM) Capture */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(capture+0), A_C_00000000, A_GPR(gpr), A_FXBUS(FXBUS_PCM_LEFT));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(capture+1), A_C_00000000, A_GPR(gpr+1), A_FXBUS(FXBUS_PCM_RIGHT));\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"PCM Capture Volume\", gpr, 0);\n\tgpr += 2;\n\n\t/* Synth Capture */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(capture+0), A_GPR(capture+0), A_GPR(gpr), A_FXBUS(FXBUS_MIDI_LEFT));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(capture+1), A_GPR(capture+1), A_GPR(gpr+1), A_FXBUS(FXBUS_MIDI_RIGHT));\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"Synth Capture Volume\", gpr, 0);\n\tgpr += 2;\n \n\t/*\n\t * inputs\n\t */\n#define A_ADD_VOLUME_IN(var,vol,input) \\\nA_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))\n\n\t/* emu1212 DSP 0 and DSP 1 Capture */\n\tif (emu->card_capabilities->emu_model) {\n\t\tif (emu->card_capabilities->ca0108_chip) {\n\t\t\t/* Note:JCD:No longer bit shift lower 16bits to upper 16bits of 32bit value. */\n\t\t\tA_OP(icode, &ptr, iMACINT0, A_GPR(tmp), A_C_00000000, A3_EMU32IN(0x0), A_C_00000001);\n\t\t\tA_OP(icode, &ptr, iMAC0, A_GPR(capture+0), A_GPR(capture+0), A_GPR(gpr), A_GPR(tmp));\n\t\t\tA_OP(icode, &ptr, iMACINT0, A_GPR(tmp), A_C_00000000, A3_EMU32IN(0x1), A_C_00000001);\n\t\t\tA_OP(icode, &ptr, iMAC0, A_GPR(capture+1), A_GPR(capture+1), A_GPR(gpr), A_GPR(tmp));\n\t\t} else {\n\t\t\tA_OP(icode, &ptr, iMAC0, A_GPR(capture+0), A_GPR(capture+0), A_GPR(gpr), A_P16VIN(0x0));\n\t\t\tA_OP(icode, &ptr, iMAC0, A_GPR(capture+1), A_GPR(capture+1), A_GPR(gpr+1), A_P16VIN(0x1));\n\t\t}\n\t\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"EMU Capture Volume\", gpr, 0);\n\t\tgpr += 2;\n\t}\n\t/* AC'97 Playback Volume - used only for mic (renamed later) */\n\tA_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_AC97_L);\n\tA_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_AC97_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"AMic Playback Volume\", gpr, 0);\n\tgpr += 2;\n\t/* AC'97 Capture Volume - used only for mic */\n\tA_ADD_VOLUME_IN(capture, gpr, A_EXTIN_AC97_L);\n\tA_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_AC97_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"Mic Capture Volume\", gpr, 0);\n\tgpr += 2;\n\n\t/* mic capture buffer */\t\n\tA_OP(icode, &ptr, iINTERP, A_EXTOUT(A_EXTOUT_MIC_CAP), A_EXTIN(A_EXTIN_AC97_L), 0xcd, A_EXTIN(A_EXTIN_AC97_R));\n\n\t/* Audigy CD Playback Volume */\n\tA_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_SPDIF_CD_L);\n\tA_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_SPDIF_CD_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++],\n\t\t\t\t\temu->card_capabilities->ac97_chip ? \"Audigy CD Playback Volume\" : \"CD Playback Volume\",\n\t\t\t\t\tgpr, 0);\n\tgpr += 2;\n\t/* Audigy CD Capture Volume */\n\tA_ADD_VOLUME_IN(capture, gpr, A_EXTIN_SPDIF_CD_L);\n\tA_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_SPDIF_CD_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++],\n\t\t\t\t\temu->card_capabilities->ac97_chip ? \"Audigy CD Capture Volume\" : \"CD Capture Volume\",\n\t\t\t\t\tgpr, 0);\n\tgpr += 2;\n\n \t/* Optical SPDIF Playback Volume */\n\tA_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_OPT_SPDIF_L);\n\tA_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_OPT_SPDIF_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], SNDRV_CTL_NAME_IEC958(\"Optical \",PLAYBACK,VOLUME), gpr, 0);\n\tgpr += 2;\n\t/* Optical SPDIF Capture Volume */\n\tA_ADD_VOLUME_IN(capture, gpr, A_EXTIN_OPT_SPDIF_L);\n\tA_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_OPT_SPDIF_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], SNDRV_CTL_NAME_IEC958(\"Optical \",CAPTURE,VOLUME), gpr, 0);\n\tgpr += 2;\n\n\t/* Line2 Playback Volume */\n\tA_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_LINE2_L);\n\tA_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_LINE2_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++],\n\t\t\t\t\temu->card_capabilities->ac97_chip ? \"Line2 Playback Volume\" : \"Line Playback Volume\",\n\t\t\t\t\tgpr, 0);\n\tgpr += 2;\n\t/* Line2 Capture Volume */\n\tA_ADD_VOLUME_IN(capture, gpr, A_EXTIN_LINE2_L);\n\tA_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_LINE2_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++],\n\t\t\t\t\temu->card_capabilities->ac97_chip ? \"Line2 Capture Volume\" : \"Line Capture Volume\",\n\t\t\t\t\tgpr, 0);\n\tgpr += 2;\n \n\t/* Philips ADC Playback Volume */\n\tA_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_ADC_L);\n\tA_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_ADC_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"Analog Mix Playback Volume\", gpr, 0);\n\tgpr += 2;\n\t/* Philips ADC Capture Volume */\n\tA_ADD_VOLUME_IN(capture, gpr, A_EXTIN_ADC_L);\n\tA_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_ADC_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"Analog Mix Capture Volume\", gpr, 0);\n\tgpr += 2;\n\n\t/* Aux2 Playback Volume */\n\tA_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_AUX2_L);\n\tA_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_AUX2_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++],\n\t\t\t\t\temu->card_capabilities->ac97_chip ? \"Aux2 Playback Volume\" : \"Aux Playback Volume\",\n\t\t\t\t\tgpr, 0);\n\tgpr += 2;\n\t/* Aux2 Capture Volume */\n\tA_ADD_VOLUME_IN(capture, gpr, A_EXTIN_AUX2_L);\n\tA_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_AUX2_R);\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++],\n\t\t\t\t\temu->card_capabilities->ac97_chip ? \"Aux2 Capture Volume\" : \"Aux Capture Volume\",\n\t\t\t\t\tgpr, 0);\n\tgpr += 2;\n\t\n\t/* Stereo Mix Front Playback Volume */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback), A_GPR(playback), A_GPR(gpr), A_GPR(stereo_mix));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+1), A_GPR(playback+1), A_GPR(gpr+1), A_GPR(stereo_mix+1));\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"Front Playback Volume\", gpr, 100);\n\tgpr += 2;\n\t\n\t/* Stereo Mix Surround Playback */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+2), A_GPR(playback+2), A_GPR(gpr), A_GPR(stereo_mix));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+3), A_GPR(playback+3), A_GPR(gpr+1), A_GPR(stereo_mix+1));\n\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"Surround Playback Volume\", gpr, 0);\n\tgpr += 2;\n\n\t/* Stereo Mix Center Playback */\n\t/* Center = sub = Left/2 + Right/2 */\n\tA_OP(icode, &ptr, iINTERP, A_GPR(tmp), A_GPR(stereo_mix), 0xcd, A_GPR(stereo_mix+1));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+4), A_GPR(playback+4), A_GPR(gpr), A_GPR(tmp));\n\tsnd_emu10k1_init_mono_control(&controls[nctl++], \"Center Playback Volume\", gpr, 0);\n\tgpr++;\n\n\t/* Stereo Mix LFE Playback */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+5), A_GPR(playback+5), A_GPR(gpr), A_GPR(tmp));\n\tsnd_emu10k1_init_mono_control(&controls[nctl++], \"LFE Playback Volume\", gpr, 0);\n\tgpr++;\n\t\n\tif (emu->card_capabilities->spk71) {\n\t\t/* Stereo Mix Side Playback */\n\t\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+6), A_GPR(playback+6), A_GPR(gpr), A_GPR(stereo_mix));\n\t\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+7), A_GPR(playback+7), A_GPR(gpr+1), A_GPR(stereo_mix+1));\n\t\tsnd_emu10k1_init_stereo_control(&controls[nctl++], \"Side Playback Volume\", gpr, 0);\n\t\tgpr += 2;\n\t}\n\n\t/*\n\t * outputs\n\t */\n#define A_PUT_OUTPUT(out,src) A_OP(icode, &ptr, iACC3, A_EXTOUT(out), A_C_00000000, A_C_00000000, A_GPR(src))\n#define A_PUT_STEREO_OUTPUT(out1,out2,src) \\\n\t{A_PUT_OUTPUT(out1,src); A_PUT_OUTPUT(out2,src+1);}\n\n#define _A_SWITCH(icode, ptr, dst, src, sw) \\\n\tA_OP((icode), ptr, iMACINT0, dst, A_C_00000000, src, sw);\n#define A_SWITCH(icode, ptr, dst, src, sw) \\\n\t\t_A_SWITCH(icode, ptr, A_GPR(dst), A_GPR(src), A_GPR(sw))\n#define _A_SWITCH_NEG(icode, ptr, dst, src) \\\n\tA_OP((icode), ptr, iANDXOR, dst, src, A_C_00000001, A_C_00000001);\n#define A_SWITCH_NEG(icode, ptr, dst, src) \\\n\t\t_A_SWITCH_NEG(icode, ptr, A_GPR(dst), A_GPR(src))\n\n\n\t/*\n\t * Process tone control\n\t */\n\tA_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 0), A_GPR(playback + 0), A_C_00000000, A_C_00000000); /* left */\n\tA_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 1), A_GPR(playback + 1), A_C_00000000, A_C_00000000); /* right */\n\tA_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 2), A_GPR(playback + 2), A_C_00000000, A_C_00000000); /* rear left */\n\tA_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 3), A_GPR(playback + 3), A_C_00000000, A_C_00000000); /* rear right */\n\tA_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 4), A_GPR(playback + 4), A_C_00000000, A_C_00000000); /* center */\n\tA_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 5), A_GPR(playback + 5), A_C_00000000, A_C_00000000); /* LFE */\n\tif (emu->card_capabilities->spk71) {\n\t\tA_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 6), A_GPR(playback + 6), A_C_00000000, A_C_00000000); /* side left */\n\t\tA_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + 7), A_GPR(playback + 7), A_C_00000000, A_C_00000000); /* side right */\n\t}\n\t\n\n\tctl = &controls[nctl + 0];\n\tctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;\n\tstrcpy(ctl->id.name, \"Tone Control - Bass\");\n\tctl->vcount = 2;\n\tctl->count = 10;\n\tctl->min = 0;\n\tctl->max = 40;\n\tctl->value[0] = ctl->value[1] = 20;\n\tctl->translation = EMU10K1_GPR_TRANSLATION_BASS;\n\tctl = &controls[nctl + 1];\n\tctl->id.iface = (__force int)SNDRV_CTL_ELEM_IFACE_MIXER;\n\tstrcpy(ctl->id.name, \"Tone Control - Treble\");\n\tctl->vcount = 2;\n\tctl->count = 10;\n\tctl->min = 0;\n\tctl->max = 40;\n\tctl->value[0] = ctl->value[1] = 20;\n\tctl->translation = EMU10K1_GPR_TRANSLATION_TREBLE;\n\n#define BASS_GPR\t0x8c\n#define TREBLE_GPR\t0x96\n\n\tfor (z = 0; z < 5; z++) {\n\t\tint j;\n\t\tfor (j = 0; j < 2; j++) {\n\t\t\tcontrols[nctl + 0].gpr[z * 2 + j] = BASS_GPR + z * 2 + j;\n\t\t\tcontrols[nctl + 1].gpr[z * 2 + j] = TREBLE_GPR + z * 2 + j;\n\t\t}\n\t}\n\tfor (z = 0; z < 4; z++) {\t\t/* front/rear/center-lfe/side */\n\t\tint j, k, l, d;\n\t\tfor (j = 0; j < 2; j++) {\t/* left/right */\n\t\t\tk = 0xb0 + (z * 8) + (j * 4);\n\t\t\tl = 0xe0 + (z * 8) + (j * 4);\n\t\t\td = playback + SND_EMU10K1_PLAYBACK_CHANNELS + z * 2 + j;\n\n\t\t\tA_OP(icode, &ptr, iMAC0, A_C_00000000, A_C_00000000, A_GPR(d), A_GPR(BASS_GPR + 0 + j));\n\t\t\tA_OP(icode, &ptr, iMACMV, A_GPR(k+1), A_GPR(k), A_GPR(k+1), A_GPR(BASS_GPR + 4 + j));\n\t\t\tA_OP(icode, &ptr, iMACMV, A_GPR(k), A_GPR(d), A_GPR(k), A_GPR(BASS_GPR + 2 + j));\n\t\t\tA_OP(icode, &ptr, iMACMV, A_GPR(k+3), A_GPR(k+2), A_GPR(k+3), A_GPR(BASS_GPR + 8 + j));\n\t\t\tA_OP(icode, &ptr, iMAC0, A_GPR(k+2), A_GPR_ACCU, A_GPR(k+2), A_GPR(BASS_GPR + 6 + j));\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(k+2), A_GPR(k+2), A_GPR(k+2), A_C_00000000);\n\n\t\t\tA_OP(icode, &ptr, iMAC0, A_C_00000000, A_C_00000000, A_GPR(k+2), A_GPR(TREBLE_GPR + 0 + j));\n\t\t\tA_OP(icode, &ptr, iMACMV, A_GPR(l+1), A_GPR(l), A_GPR(l+1), A_GPR(TREBLE_GPR + 4 + j));\n\t\t\tA_OP(icode, &ptr, iMACMV, A_GPR(l), A_GPR(k+2), A_GPR(l), A_GPR(TREBLE_GPR + 2 + j));\n\t\t\tA_OP(icode, &ptr, iMACMV, A_GPR(l+3), A_GPR(l+2), A_GPR(l+3), A_GPR(TREBLE_GPR + 8 + j));\n\t\t\tA_OP(icode, &ptr, iMAC0, A_GPR(l+2), A_GPR_ACCU, A_GPR(l+2), A_GPR(TREBLE_GPR + 6 + j));\n\t\t\tA_OP(icode, &ptr, iMACINT0, A_GPR(l+2), A_C_00000000, A_GPR(l+2), A_C_00000010);\n\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(d), A_GPR(l+2), A_C_00000000, A_C_00000000);\n\n\t\t\tif (z == 2)\t/* center */\n\t\t\t\tbreak;\n\t\t}\n\t}\n\tnctl += 2;\n\n#undef BASS_GPR\n#undef TREBLE_GPR\n\n\tfor (z = 0; z < 8; z++) {\n\t\tA_SWITCH(icode, &ptr, tmp + 0, playback + SND_EMU10K1_PLAYBACK_CHANNELS + z, gpr + 0);\n\t\tA_SWITCH_NEG(icode, &ptr, tmp + 1, gpr + 0);\n\t\tA_SWITCH(icode, &ptr, tmp + 1, playback + z, tmp + 1);\n\t\tA_OP(icode, &ptr, iACC3, A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + z), A_GPR(tmp + 0), A_GPR(tmp + 1), A_C_00000000);\n\t}\n\tsnd_emu10k1_init_stereo_onoff_control(controls + nctl++, \"Tone Control - Switch\", gpr, 0);\n\tgpr += 2;\n\n\t/* Master volume (will be renamed later) */\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+0+SND_EMU10K1_PLAYBACK_CHANNELS));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+1+SND_EMU10K1_PLAYBACK_CHANNELS));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+2+SND_EMU10K1_PLAYBACK_CHANNELS));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+3+SND_EMU10K1_PLAYBACK_CHANNELS));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+4+SND_EMU10K1_PLAYBACK_CHANNELS));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+5+SND_EMU10K1_PLAYBACK_CHANNELS));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+6+SND_EMU10K1_PLAYBACK_CHANNELS));\n\tA_OP(icode, &ptr, iMAC0, A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS), A_C_00000000, A_GPR(gpr), A_GPR(playback+7+SND_EMU10K1_PLAYBACK_CHANNELS));\n\tsnd_emu10k1_init_mono_control(&controls[nctl++], \"Wave Master Playback Volume\", gpr, 0);\n\tgpr += 2;\n\n\t/* analog speakers */\n\tA_PUT_STEREO_OUTPUT(A_EXTOUT_AFRONT_L, A_EXTOUT_AFRONT_R, playback + SND_EMU10K1_PLAYBACK_CHANNELS);\n\tA_PUT_STEREO_OUTPUT(A_EXTOUT_AREAR_L, A_EXTOUT_AREAR_R, playback+2 + SND_EMU10K1_PLAYBACK_CHANNELS);\n\tA_PUT_OUTPUT(A_EXTOUT_ACENTER, playback+4 + SND_EMU10K1_PLAYBACK_CHANNELS);\n\tA_PUT_OUTPUT(A_EXTOUT_ALFE, playback+5 + SND_EMU10K1_PLAYBACK_CHANNELS);\n\tif (emu->card_capabilities->spk71)\n\t\tA_PUT_STEREO_OUTPUT(A_EXTOUT_ASIDE_L, A_EXTOUT_ASIDE_R, playback+6 + SND_EMU10K1_PLAYBACK_CHANNELS);\n\n\t/* headphone */\n\tA_PUT_STEREO_OUTPUT(A_EXTOUT_HEADPHONE_L, A_EXTOUT_HEADPHONE_R, playback + SND_EMU10K1_PLAYBACK_CHANNELS);\n\n\t/* digital outputs */\n\t/* A_PUT_STEREO_OUTPUT(A_EXTOUT_FRONT_L, A_EXTOUT_FRONT_R, playback + SND_EMU10K1_PLAYBACK_CHANNELS); */\n\tif (emu->card_capabilities->emu_model) {\n\t\t/* EMU1010 Outputs from PCM Front, Rear, Center, LFE, Side */\n\t\tdev_info(emu->card->dev, \"EMU outputs on\\n\");\n\t\tfor (z = 0; z < 8; z++) {\n\t\t\tif (emu->card_capabilities->ca0108_chip) {\n\t\t\t\tA_OP(icode, &ptr, iACC3, A3_EMU32OUT(z), A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + z), A_C_00000000, A_C_00000000);\n\t\t\t} else {\n\t\t\t\tA_OP(icode, &ptr, iACC3, A_EMU32OUTL(z), A_GPR(playback + SND_EMU10K1_PLAYBACK_CHANNELS + z), A_C_00000000, A_C_00000000);\n\t\t\t}\n\t\t}\n\t}\n\n\t/* IEC958 Optical Raw Playback Switch */ \n\tgpr_map[gpr++] = 0;\n\tgpr_map[gpr++] = 0x1008;\n\tgpr_map[gpr++] = 0xffff0000;\n\tfor (z = 0; z < 2; z++) {\n\t\tA_OP(icode, &ptr, iMAC0, A_GPR(tmp + 2), A_FXBUS(FXBUS_PT_LEFT + z), A_C_00000000, A_C_00000000);\n\t\tA_OP(icode, &ptr, iSKIP, A_GPR_COND, A_GPR_COND, A_GPR(gpr - 2), A_C_00000001);\n\t\tA_OP(icode, &ptr, iACC3, A_GPR(tmp + 2), A_C_00000000, A_C_00010000, A_GPR(tmp + 2));\n\t\tA_OP(icode, &ptr, iANDXOR, A_GPR(tmp + 2), A_GPR(tmp + 2), A_GPR(gpr - 1), A_C_00000000);\n\t\tA_SWITCH(icode, &ptr, tmp + 0, tmp + 2, gpr + z);\n\t\tA_SWITCH_NEG(icode, &ptr, tmp + 1, gpr + z);\n\t\tA_SWITCH(icode, &ptr, tmp + 1, playback + SND_EMU10K1_PLAYBACK_CHANNELS + z, tmp + 1);\n\t\tif ((z==1) && (emu->card_capabilities->spdif_bug)) {\n\t\t\t/* Due to a SPDIF output bug on some Audigy cards, this code delays the Right channel by 1 sample */\n\t\t\tdev_info(emu->card->dev,\n\t\t\t\t \"Installing spdif_bug patch: %s\\n\",\n\t\t\t\t emu->card_capabilities->name);\n\t\t\tA_OP(icode, &ptr, iACC3, A_EXTOUT(A_EXTOUT_FRONT_L + z), A_GPR(gpr - 3), A_C_00000000, A_C_00000000);\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 3), A_GPR(tmp + 0), A_GPR(tmp + 1), A_C_00000000);\n\t\t} else {\n\t\t\tA_OP(icode, &ptr, iACC3, A_EXTOUT(A_EXTOUT_FRONT_L + z), A_GPR(tmp + 0), A_GPR(tmp + 1), A_C_00000000);\n\t\t}\n\t}\n\tsnd_emu10k1_init_stereo_onoff_control(controls + nctl++, SNDRV_CTL_NAME_IEC958(\"Optical Raw \",PLAYBACK,SWITCH), gpr, 0);\n\tgpr += 2;\n\t\n\tA_PUT_STEREO_OUTPUT(A_EXTOUT_REAR_L, A_EXTOUT_REAR_R, playback+2 + SND_EMU10K1_PLAYBACK_CHANNELS);\n\tA_PUT_OUTPUT(A_EXTOUT_CENTER, playback+4 + SND_EMU10K1_PLAYBACK_CHANNELS);\n\tA_PUT_OUTPUT(A_EXTOUT_LFE, playback+5 + SND_EMU10K1_PLAYBACK_CHANNELS);\n\n\t/* ADC buffer */\n#ifdef EMU10K1_CAPTURE_DIGITAL_OUT\n\tA_PUT_STEREO_OUTPUT(A_EXTOUT_ADC_CAP_L, A_EXTOUT_ADC_CAP_R, playback + SND_EMU10K1_PLAYBACK_CHANNELS);\n#else\n\tA_PUT_OUTPUT(A_EXTOUT_ADC_CAP_L, capture);\n\tA_PUT_OUTPUT(A_EXTOUT_ADC_CAP_R, capture+1);\n#endif\n\n\tif (emu->card_capabilities->emu_model) {\n\t\tif (emu->card_capabilities->ca0108_chip) {\n\t\t\tdev_info(emu->card->dev, \"EMU2 inputs on\\n\");\n\t\t\tfor (z = 0; z < 0x10; z++) {\n\t\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, \n\t\t\t\t\t\t\t\t\tbit_shifter16,\n\t\t\t\t\t\t\t\t\tA3_EMU32IN(z),\n\t\t\t\t\t\t\t\t\tA_FXBUS2(z*2) );\n\t\t\t}\n\t\t} else {\n\t\t\tdev_info(emu->card->dev, \"EMU inputs on\\n\");\n\t\t\t/* Capture 16 (originally 8) channels of S32_LE sound */\n\n\t\t\t/*\n\t\t\tdev_dbg(emu->card->dev, \"emufx.c: gpr=0x%x, tmp=0x%x\\n\",\n\t\t\t gpr, tmp);\n\t\t\t*/\n\t\t\t/* For the EMU1010: How to get 32bit values from the DSP. High 16bits into L, low 16bits into R. */\n\t\t\t/* A_P16VIN(0) is delayed by one sample,\n\t\t\t * so all other A_P16VIN channels will need to also be delayed\n\t\t\t */\n\t\t\t/* Left ADC in. 1 of 2 */\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_P16VIN(0x0), A_FXBUS2(0) );\n\t\t\t/* Right ADC in 1 of 2 */\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\t/* Delaying by one sample: instead of copying the input\n\t\t\t * value A_P16VIN to output A_FXBUS2 as in the first channel,\n\t\t\t * we use an auxiliary register, delaying the value by one\n\t\t\t * sample\n\t\t\t */\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(2) );\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x1), A_C_00000000, A_C_00000000);\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(4) );\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x2), A_C_00000000, A_C_00000000);\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(6) );\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x3), A_C_00000000, A_C_00000000);\n\t\t\t/* For 96kHz mode */\n\t\t\t/* Left ADC in. 2 of 2 */\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0x8) );\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x4), A_C_00000000, A_C_00000000);\n\t\t\t/* Right ADC in 2 of 2 */\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xa) );\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x5), A_C_00000000, A_C_00000000);\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xc) );\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x6), A_C_00000000, A_C_00000000);\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16( icode, &ptr, tmp, bit_shifter16, A_GPR(gpr - 1), A_FXBUS2(0xe) );\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x7), A_C_00000000, A_C_00000000);\n\t\t\t/* Pavel Hofman - we still have voices, A_FXBUS2s, and\n\t\t\t * A_P16VINs available -\n\t\t\t * let's add 8 more capture channels - total of 16\n\t\t\t */\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,\n\t\t\t\t\t\t\t\t bit_shifter16,\n\t\t\t\t\t\t\t\t A_GPR(gpr - 1),\n\t\t\t\t\t\t\t\t A_FXBUS2(0x10));\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x8),\n\t\t\t A_C_00000000, A_C_00000000);\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,\n\t\t\t\t\t\t\t\t bit_shifter16,\n\t\t\t\t\t\t\t\t A_GPR(gpr - 1),\n\t\t\t\t\t\t\t\t A_FXBUS2(0x12));\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0x9),\n\t\t\t A_C_00000000, A_C_00000000);\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,\n\t\t\t\t\t\t\t\t bit_shifter16,\n\t\t\t\t\t\t\t\t A_GPR(gpr - 1),\n\t\t\t\t\t\t\t\t A_FXBUS2(0x14));\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xa),\n\t\t\t A_C_00000000, A_C_00000000);\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,\n\t\t\t\t\t\t\t\t bit_shifter16,\n\t\t\t\t\t\t\t\t A_GPR(gpr - 1),\n\t\t\t\t\t\t\t\t A_FXBUS2(0x16));\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xb),\n\t\t\t A_C_00000000, A_C_00000000);\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,\n\t\t\t\t\t\t\t\t bit_shifter16,\n\t\t\t\t\t\t\t\t A_GPR(gpr - 1),\n\t\t\t\t\t\t\t\t A_FXBUS2(0x18));\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xc),\n\t\t\t A_C_00000000, A_C_00000000);\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,\n\t\t\t\t\t\t\t\t bit_shifter16,\n\t\t\t\t\t\t\t\t A_GPR(gpr - 1),\n\t\t\t\t\t\t\t\t A_FXBUS2(0x1a));\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xd),\n\t\t\t A_C_00000000, A_C_00000000);\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,\n\t\t\t\t\t\t\t\t bit_shifter16,\n\t\t\t\t\t\t\t\t A_GPR(gpr - 1),\n\t\t\t\t\t\t\t\t A_FXBUS2(0x1c));\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xe),\n\t\t\t A_C_00000000, A_C_00000000);\n\t\t\tgpr_map[gpr++] = 0x00000000;\n\t\t\tsnd_emu10k1_audigy_dsp_convert_32_to_2x16(icode, &ptr, tmp,\n\t\t\t\t\t\t\t\t bit_shifter16,\n\t\t\t\t\t\t\t\t A_GPR(gpr - 1),\n\t\t\t\t\t\t\t\t A_FXBUS2(0x1e));\n\t\t\tA_OP(icode, &ptr, iACC3, A_GPR(gpr - 1), A_P16VIN(0xf),\n\t\t\t A_C_00000000, A_C_00000000);\n\t\t}\n\n#if 0\n\t\tfor (z = 4; z < 8; z++) {\n\t\t\tA_OP(icode, &ptr, iACC3, A_FXBUS2(z), A_C_00000000, A_C_00000000, A_C_00000000);\n\t\t}\n\t\tfor (z = 0xc; z < 0x10; z++) {\n\t\t\tA_OP(icode, &ptr, iACC3, A_FXBUS2(z), A_C_00000000, A_C_00000000, A_C_00000000);\n\t\t}\n#endif\n\t} else {\n\t\t/* EFX capture - capture the 16 EXTINs */\n\t\t/* Capture 16 channels of S16_LE sound */\n\t\tfor (z = 0; z < 16; z++) {\n\t\t\tA_OP(icode, &ptr, iACC3, A_FXBUS2(z), A_C_00000000, A_C_00000000, A_EXTIN(z));\n\t\t}\n\t}\n\t\n#endif /* JCD test */\n\t/*\n\t * ok, set up done..\n\t */\n\n\tif (gpr > tmp) {\n\t\tsnd_BUG();\n\t\terr = -EIO;\n\t\tgoto __err;\n\t}\n\t/* clear remaining instruction memory */\n\twhile (ptr < 0x400)\n\t\tA_OP(icode, &ptr, 0x0f, 0xc0, 0xc0, 0xcf, 0xc0);\n\n\ticode->gpr_add_control_count = nctl;\n\ticode->gpr_add_controls = controls;\n\temu->support_tlv = 1; /* support TLV */\n\terr = snd_emu10k1_icode_poke(emu, icode, true);\n\temu->support_tlv = 0; /* clear again */\n\n__err:\n\tkfree(controls);\n__err_ctrls:\n\tkfree(icode->gpr_map);\n__err_gpr:\n\tkfree(icode);\n\treturn err;\n}", "static int __init czc_p10t_init(void)\n{\n\t/*\n\t * The device boots up in \"Windows 7\" mode, when the home button sends a\n\t * Windows specific key sequence (Left Meta + D) and the second button": "static int __init czc_p10t_init(void)\n{\n\t/*\n\t * The device boots up in \"Windows 7\" mode, when the home button sends a\n\t * Windows specific key sequence (Left Meta + D) and the second button\n\t * sends an unknown one while also toggling the Radio Kill Switch.\n\t * This is a surprising behavior when the second button is labeled \"Back\".\n\t *\n\t * The vendor-supplied Android-x86 build switches the device to a \"Android\"\n\t * mode by writing value 0x63 to the I/O port 0x68. This just seems to just\n\t * set bit 6 on address 0x96 in the EC region; switching the bit directly\n\t * seems to achieve the same result. It uses a \"p10t_switcher\" to do the\n\t * job. It doesn't seem to be able to do anything else, and no other use\n\t * of the port 0x68 is known.\n\t *\n\t * In the Android mode, the home button sends just a single scancode,\n\t * which can be handled in Linux userspace more reasonably and the back\n\t * button only sends a scancode without toggling the kill switch.\n\t * The scancode can then be mapped either to Back or RF Kill functionality\n\t * in userspace, depending on how the button is labeled on that particular\n\t * model.\n\t */\n\toutb(CZC_EC_ANDROID_KEYS, CZC_EC_EXTRA_PORT);\n\treturn 0;\n}", "static int aic_init_cpu(unsigned int cpu)\n{\n\t/* Mask all hard-wired per-CPU IRQ/FIQ sources */\n\n\t/* Pending Fast IPI FIQs */": "static int aic_init_cpu(unsigned int cpu)\n{\n\t/* Mask all hard-wired per-CPU IRQ/FIQ sources */\n\n\t/* Pending Fast IPI FIQs */\n\twrite_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);\n\n\t/* Timer FIQs */\n\tsysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);\n\tsysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);\n\n\t/* EL2-only (VHE mode) IRQ sources */\n\tif (is_kernel_in_hyp_mode()) {\n\t\t/* Guest timers */\n\t\tsysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2,\n\t\t\t\t VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);\n\n\t\t/* vGIC maintenance IRQ */\n\t\tsysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);\n\t}\n\n\t/* PMC FIQ */\n\tsysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,\n\t\t\t FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));\n\n\t/* Uncore PMC FIQ */\n\tsysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,\n\t\t\t FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));\n\n\t/* Commit all of the above */\n\tisb();\n\n\tif (aic_irqc->info.version == 1) {\n\t\t/*\n\t\t * Make sure the kernel's idea of logical CPU order is the same as AIC's\n\t\t * If we ever end up with a mismatch here, we will have to introduce\n\t\t * a mapping table similar to what other irqchip drivers do.\n\t\t */\n\t\tWARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());\n\n\t\t/*\n\t\t * Always keep IPIs unmasked at the hardware level (except auto-masking\n\t\t * by AIC during processing). We manage masks at the vIPI level.\n\t\t * These registers only exist on AICv1, AICv2 always uses fast IPIs.\n\t\t */\n\t\taic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);\n\t\tif (static_branch_likely(&use_fast_ipi)) {\n\t\t\taic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);\n\t\t} else {\n\t\t\taic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);\n\t\t\taic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);\n\t\t}\n\t}\n\n\t/* Initialize the local mask state */\n\t__this_cpu_write(aic_fiq_unmasked, 0);\n\n\treturn 0;\n}", "static inline int erase_block (__u32 offset)\n{\n __u32 status;\n\n#ifdef LART_DEBUG": "static inline int erase_block (__u32 offset)\n{\n __u32 status;\n\n#ifdef LART_DEBUG\n printk (KERN_DEBUG \"%s(): 0x%.8x\\n\", __func__, offset);\n#endif\n\n /* erase and confirm */\n write32 (DATA_TO_FLASH (ERASE_SETUP),offset);\n write32 (DATA_TO_FLASH (ERASE_CONFIRM),offset);\n\n /* wait for block erase to finish */\n do\n\t {\n\t\twrite32 (DATA_TO_FLASH (STATUS_READ),offset);\n\t\tstatus = FLASH_TO_DATA (read32 (offset));\n\t }\n while ((~status & STATUS_BUSY) != 0);\n\n /* put the flash back into command mode */\n write32 (DATA_TO_FLASH (READ_ARRAY),offset);\n\n /* was the erase successful? */\n if ((status & STATUS_ERASE_ERR))\n\t {\n\t\tprintk (KERN_WARNING \"%s: erase error at address 0x%.8x.\\n\",module_name,offset);\n\t\treturn (0);\n\t }\n\n return (1);\n}", "static void ae5_post_dsp_stream_setup(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\n\tmutex_lock(&spec->chipio_mutex);": "static void ae5_post_dsp_stream_setup(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\n\tmutex_lock(&spec->chipio_mutex);\n\n\tsnd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0, 0x725, 0x81);\n\n\tchipio_set_conn_rate_no_mutex(codec, 0x70, SR_96_000);\n\n\tchipio_set_stream_source_dest(codec, 0x5, 0x43, 0x0);\n\n\tchipio_set_stream_source_dest(codec, 0x18, 0x9, 0xd0);\n\tchipio_set_conn_rate_no_mutex(codec, 0xd0, SR_96_000);\n\tchipio_set_stream_channels(codec, 0x18, 6);\n\tchipio_set_stream_control(codec, 0x18, 1);\n\n\tchipio_set_control_param_no_mutex(codec, CONTROL_PARAM_ASI, 4);\n\n\tchipio_8051_write_pll_pmu_no_mutex(codec, 0x43, 0xc7);\n\n\tca0113_mmio_command_set(codec, 0x48, 0x01, 0x80);\n\n\tmutex_unlock(&spec->chipio_mutex);\n}", "static int ni_expand_mft_list(struct ntfs_inode *ni)\n{\n\tint err = 0;\n\tstruct runs_tree *run = &ni->file.run;\n\tu32 asize, run_size, done = 0;": "static int ni_expand_mft_list(struct ntfs_inode *ni)\n{\n\tint err = 0;\n\tstruct runs_tree *run = &ni->file.run;\n\tu32 asize, run_size, done = 0;\n\tstruct ATTRIB *attr;\n\tstruct rb_node *node;\n\tCLST mft_min, mft_new, svcn, evcn, plen;\n\tstruct mft_inode *mi, *mi_min, *mi_new;\n\tstruct ntfs_sb_info *sbi = ni->mi.sbi;\n\n\t/* Find the nearest MFT. */\n\tmft_min = 0;\n\tmft_new = 0;\n\tmi_min = NULL;\n\n\tfor (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {\n\t\tmi = rb_entry(node, struct mft_inode, node);\n\n\t\tattr = mi_enum_attr(mi, NULL);\n\n\t\tif (!attr) {\n\t\t\tmft_min = mi->rno;\n\t\t\tmi_min = mi;\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tif (ntfs_look_free_mft(sbi, &mft_new, true, ni, &mi_new)) {\n\t\tmft_new = 0;\n\t\t/* Really this is not critical. */\n\t} else if (mft_min > mft_new) {\n\t\tmft_min = mft_new;\n\t\tmi_min = mi_new;\n\t} else {\n\t\tntfs_mark_rec_free(sbi, mft_new);\n\t\tmft_new = 0;\n\t\tni_remove_mi(ni, mi_new);\n\t}\n\n\tattr = mi_find_attr(&ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);\n\tif (!attr) {\n\t\terr = -EINVAL;\n\t\tgoto out;\n\t}\n\n\tasize = le32_to_cpu(attr->size);\n\n\tevcn = le64_to_cpu(attr->nres.evcn);\n\tsvcn = bytes_to_cluster(sbi, (u64)(mft_min + 1) << sbi->record_bits);\n\tif (evcn + 1 >= svcn) {\n\t\terr = -EINVAL;\n\t\tgoto out;\n\t}\n\n\t/*\n\t * Split primary attribute [0 evcn] in two parts [0 svcn) + [svcn evcn].\n\t *\n\t * Update first part of ATTR_DATA in 'primary MFT.\n\t */\n\terr = run_pack(run, 0, svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),\n\t\t asize - SIZEOF_NONRESIDENT, &plen);\n\tif (err < 0)\n\t\tgoto out;\n\n\trun_size = ALIGN(err, 8);\n\terr = 0;\n\n\tif (plen < svcn) {\n\t\terr = -EINVAL;\n\t\tgoto out;\n\t}\n\n\tattr->nres.evcn = cpu_to_le64(svcn - 1);\n\tattr->size = cpu_to_le32(run_size + SIZEOF_NONRESIDENT);\n\t/* 'done' - How many bytes of primary MFT becomes free. */\n\tdone = asize - run_size - SIZEOF_NONRESIDENT;\n\tle32_sub_cpu(&ni->mi.mrec->used, done);\n\n\t/* Estimate the size of second part: run_buf=NULL. */\n\terr = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size,\n\t\t &plen);\n\tif (err < 0)\n\t\tgoto out;\n\n\trun_size = ALIGN(err, 8);\n\terr = 0;\n\n\tif (plen < evcn + 1 - svcn) {\n\t\terr = -EINVAL;\n\t\tgoto out;\n\t}\n\n\t/*\n\t * This function may implicitly call expand attr_list.\n\t * Insert second part of ATTR_DATA in 'mi_min'.\n\t */\n\tattr = ni_ins_new_attr(ni, mi_min, NULL, ATTR_DATA, NULL, 0,\n\t\t\t SIZEOF_NONRESIDENT + run_size,\n\t\t\t SIZEOF_NONRESIDENT, svcn, NULL);\n\tif (!attr) {\n\t\terr = -EINVAL;\n\t\tgoto out;\n\t}\n\n\tattr->non_res = 1;\n\tattr->name_off = SIZEOF_NONRESIDENT_LE;\n\tattr->flags = 0;\n\n\trun_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),\n\t\t run_size, &plen);\n\n\tattr->nres.svcn = cpu_to_le64(svcn);\n\tattr->nres.evcn = cpu_to_le64(evcn);\n\tattr->nres.run_off = cpu_to_le16(SIZEOF_NONRESIDENT);\n\nout:\n\tif (mft_new) {\n\t\tntfs_mark_rec_free(sbi, mft_new);\n\t\tni_remove_mi(ni, mi_new);\n\t}\n\n\treturn !err && !done ? -EOPNOTSUPP : err;\n}", "static int ca0132_prepare_verbs(struct hda_codec *codec)\n{\n/* Verbs + terminator (an empty element) */\n#define NUM_SPEC_VERBS 2\n\tstruct ca0132_spec *spec = codec->spec;": "static int ca0132_prepare_verbs(struct hda_codec *codec)\n{\n/* Verbs + terminator (an empty element) */\n#define NUM_SPEC_VERBS 2\n\tstruct ca0132_spec *spec = codec->spec;\n\n\tspec->chip_init_verbs = ca0132_init_verbs0;\n\t/*\n\t * Since desktop cards use pci_mmio, this can be used to determine\n\t * whether or not to use these verbs instead of a separate bool.\n\t */\n\tif (ca0132_use_pci_mmio(spec))\n\t\tspec->desktop_init_verbs = ca0132_init_verbs1;\n\tspec->spec_init_verbs = kcalloc(NUM_SPEC_VERBS,\n\t\t\t\t\tsizeof(struct hda_verb),\n\t\t\t\t\tGFP_KERNEL);\n\tif (!spec->spec_init_verbs)\n\t\treturn -ENOMEM;\n\n\t/* config EAPD */\n\tspec->spec_init_verbs[0].nid = 0x0b;\n\tspec->spec_init_verbs[0].param = 0x78D;\n\tspec->spec_init_verbs[0].verb = 0x00;\n\n\t/* Previously commented configuration */\n\t/*\n\tspec->spec_init_verbs[2].nid = 0x0b;\n\tspec->spec_init_verbs[2].param = AC_VERB_SET_EAPD_BTLENABLE;\n\tspec->spec_init_verbs[2].verb = 0x02;\n\n\tspec->spec_init_verbs[3].nid = 0x10;\n\tspec->spec_init_verbs[3].param = 0x78D;\n\tspec->spec_init_verbs[3].verb = 0x02;\n\n\tspec->spec_init_verbs[4].nid = 0x10;\n\tspec->spec_init_verbs[4].param = AC_VERB_SET_EAPD_BTLENABLE;\n\tspec->spec_init_verbs[4].verb = 0x02;\n\t*/\n\n\t/* Terminator: spec->spec_init_verbs[NUM_SPEC_VERBS-1] */\n\treturn 0;\n}", "static int dib7090p_get_best_sampling(struct dvb_frontend *fe , struct dibx090p_best_adc *adc)\n{\n\tu8 spur = 0, prediv = 0, loopdiv = 0, min_prediv = 1, max_prediv = 1;\n\n\tu16 xtal = 12000;": "static int dib7090p_get_best_sampling(struct dvb_frontend *fe , struct dibx090p_best_adc *adc)\n{\n\tu8 spur = 0, prediv = 0, loopdiv = 0, min_prediv = 1, max_prediv = 1;\n\n\tu16 xtal = 12000;\n\tu32 fcp_min = 1900; /* PLL Minimum Frequency comparator KHz */\n\tu32 fcp_max = 20000; /* PLL Maximum Frequency comparator KHz */\n\tu32 fdem_max = 76000;\n\tu32 fdem_min = 69500;\n\tu32 fcp = 0, fs = 0, fdem = 0;\n\tu32 harmonic_id = 0;\n\n\tadc->pll_loopdiv = loopdiv;\n\tadc->pll_prediv = prediv;\n\tadc->timf = 0;\n\n\tdeb_info(\"bandwidth = %d fdem_min =%d\", fe->dtv_property_cache.bandwidth_hz, fdem_min);\n\n\t/* Find Min and Max prediv */\n\twhile ((xtal/max_prediv) >= fcp_min)\n\t\tmax_prediv++;\n\n\tmax_prediv--;\n\tmin_prediv = max_prediv;\n\twhile ((xtal/min_prediv) <= fcp_max) {\n\t\tmin_prediv--;\n\t\tif (min_prediv == 1)\n\t\t\tbreak;\n\t}\n\tdeb_info(\"MIN prediv = %d : MAX prediv = %d\", min_prediv, max_prediv);\n\n\tmin_prediv = 2;\n\n\tfor (prediv = min_prediv ; prediv < max_prediv; prediv++) {\n\t\tfcp = xtal / prediv;\n\t\tif (fcp > fcp_min && fcp < fcp_max) {\n\t\t\tfor (loopdiv = 1 ; loopdiv < 64 ; loopdiv++) {\n\t\t\t\tfdem = ((xtal/prediv) * loopdiv);\n\t\t\t\tfs = fdem / 4;\n\t\t\t\t/* test min/max system restrictions */\n\n\t\t\t\tif ((fdem >= fdem_min) && (fdem <= fdem_max) && (fs >= fe->dtv_property_cache.bandwidth_hz/1000)) {\n\t\t\t\t\tspur = 0;\n\t\t\t\t\t/* test fs harmonics positions */\n\t\t\t\t\tfor (harmonic_id = (fe->dtv_property_cache.frequency / (1000*fs)) ; harmonic_id <= ((fe->dtv_property_cache.frequency / (1000*fs))+1) ; harmonic_id++) {\n\t\t\t\t\t\tif (((fs*harmonic_id) >= ((fe->dtv_property_cache.frequency/1000) - (fe->dtv_property_cache.bandwidth_hz/2000))) && ((fs*harmonic_id) <= ((fe->dtv_property_cache.frequency/1000) + (fe->dtv_property_cache.bandwidth_hz/2000)))) {\n\t\t\t\t\t\t\tspur = 1;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (!spur) {\n\t\t\t\t\t\tadc->pll_loopdiv = loopdiv;\n\t\t\t\t\t\tadc->pll_prediv = prediv;\n\t\t\t\t\t\tadc->timf = 2396745143UL/fdem*(1 << 9);\n\t\t\t\t\t\tadc->timf += ((2396745143UL%fdem) << 9)/fdem;\n\t\t\t\t\t\tdeb_info(\"loopdiv=%i prediv=%i timf=%i\", loopdiv, prediv, adc->timf);\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif (!spur)\n\t\t\tbreak;\n\t}\n\n\n\tif (adc->pll_loopdiv == 0 && adc->pll_prediv == 0)\n\t\treturn -EINVAL;\n\telse\n\t\treturn 0;\n}", "static int cs35l41_hda_read_acpi(struct cs35l41_hda *cs35l41, const char *hid, int id)\n{\n\tstruct cs35l41_hw_cfg *hw_cfg = &cs35l41->hw_cfg;\n\tu32 values[HDA_MAX_COMPONENTS];\n\tstruct acpi_device *adev;": "static int cs35l41_hda_read_acpi(struct cs35l41_hda *cs35l41, const char *hid, int id)\n{\n\tstruct cs35l41_hw_cfg *hw_cfg = &cs35l41->hw_cfg;\n\tu32 values[HDA_MAX_COMPONENTS];\n\tstruct acpi_device *adev;\n\tstruct device *physdev;\n\tchar *property;\n\tsize_t nval;\n\tint i, ret;\n\n\tadev = acpi_dev_get_first_match_dev(hid, NULL, -1);\n\tif (!adev) {\n\t\tdev_err(cs35l41->dev, \"Failed to find an ACPI device for %s\\n\", hid);\n\t\treturn -ENODEV;\n\t}\n\n\tphysdev = get_device(acpi_get_first_physical_node(adev));\n\tacpi_dev_put(adev);\n\n\tproperty = \"cirrus,dev-index\";\n\tret = device_property_count_u32(physdev, property);\n\tif (ret <= 0)\n\t\tgoto no_acpi_dsd;\n\n\tif (ret > ARRAY_SIZE(values)) {\n\t\tret = -EINVAL;\n\t\tgoto err;\n\t}\n\tnval = ret;\n\n\tret = device_property_read_u32_array(physdev, property, values, nval);\n\tif (ret)\n\t\tgoto err;\n\n\tcs35l41->index = -1;\n\tfor (i = 0; i < nval; i++) {\n\t\tif (values[i] == id) {\n\t\t\tcs35l41->index = i;\n\t\t\tbreak;\n\t\t}\n\t}\n\tif (cs35l41->index == -1) {\n\t\tdev_err(cs35l41->dev, \"No index found in %s\\n\", property);\n\t\tret = -ENODEV;\n\t\tgoto err;\n\t}\n\n\t/* To use the same release code for all laptop variants we can't use devm_ version of\n\t * gpiod_get here, as CLSA010* don't have a fully functional bios with an _DSD node\n\t */\n\tcs35l41->reset_gpio = fwnode_gpiod_get_index(&adev->fwnode, \"reset\", cs35l41->index,\n\t\t\t\t\t\t GPIOD_OUT_LOW, \"cs35l41-reset\");\n\n\tproperty = \"cirrus,speaker-position\";\n\tret = device_property_read_u32_array(physdev, property, values, nval);\n\tif (ret)\n\t\tgoto err;\n\thw_cfg->spk_pos = values[cs35l41->index];\n\n\tcs35l41->channel_index = 0;\n\tfor (i = 0; i < cs35l41->index; i++)\n\t\tif (values[i] == hw_cfg->spk_pos)\n\t\t\tcs35l41->channel_index++;\n\n\tproperty = \"cirrus,gpio1-func\";\n\tret = device_property_read_u32_array(physdev, property, values, nval);\n\tif (ret)\n\t\tgoto err;\n\thw_cfg->gpio1.func = values[cs35l41->index];\n\thw_cfg->gpio1.valid = true;\n\n\tproperty = \"cirrus,gpio2-func\";\n\tret = device_property_read_u32_array(physdev, property, values, nval);\n\tif (ret)\n\t\tgoto err;\n\thw_cfg->gpio2.func = values[cs35l41->index];\n\thw_cfg->gpio2.valid = true;\n\n\tproperty = \"cirrus,boost-peak-milliamp\";\n\tret = device_property_read_u32_array(physdev, property, values, nval);\n\tif (ret == 0)\n\t\thw_cfg->bst_ipk = values[cs35l41->index];\n\telse\n\t\thw_cfg->bst_ipk = -1;\n\n\tproperty = \"cirrus,boost-ind-nanohenry\";\n\tret = device_property_read_u32_array(physdev, property, values, nval);\n\tif (ret == 0)\n\t\thw_cfg->bst_ind = values[cs35l41->index];\n\telse\n\t\thw_cfg->bst_ind = -1;\n\n\tproperty = \"cirrus,boost-cap-microfarad\";\n\tret = device_property_read_u32_array(physdev, property, values, nval);\n\tif (ret == 0)\n\t\thw_cfg->bst_cap = values[cs35l41->index];\n\telse\n\t\thw_cfg->bst_cap = -1;\n\n\tif (hw_cfg->bst_ind > 0 || hw_cfg->bst_cap > 0 || hw_cfg->bst_ipk > 0)\n\t\thw_cfg->bst_type = CS35L41_INT_BOOST;\n\telse\n\t\thw_cfg->bst_type = CS35L41_EXT_BOOST;\n\n\thw_cfg->valid = true;\n\tput_device(physdev);\n\n\treturn 0;\n\nerr:\n\tput_device(physdev);\n\tdev_err(cs35l41->dev, \"Failed property %s: %d\\n\", property, ret);\n\n\treturn ret;\n\nno_acpi_dsd:\n\t/*\n\t * Device CLSA0100 doesn't have _DSD so a gpiod_get by the label reset won't work.\n\t * And devices created by i2c-multi-instantiate don't have their device struct pointing to\n\t * the correct fwnode, so acpi_dev must be used here.\n\t * And devm functions expect that the device requesting the resource has the correct\n\t * fwnode.\n\t */\n\tif (strncmp(hid, \"CLSA0100\", 8) != 0)\n\t\treturn -EINVAL;\n\n\t/* check I2C address to assign the index */\n\tcs35l41->index = id == 0x40 ? 0 : 1;\n\tcs35l41->hw_cfg.spk_pos = cs35l41->index;\n\tcs35l41->channel_index = 0;\n\tcs35l41->reset_gpio = gpiod_get_index(physdev, NULL, 0, GPIOD_OUT_HIGH);\n\tcs35l41->hw_cfg.bst_type = CS35L41_EXT_BOOST_NO_VSPK_SWITCH;\n\thw_cfg->gpio2.func = CS35L41_GPIO2_INT_OPEN_DRAIN;\n\thw_cfg->gpio2.valid = true;\n\tcs35l41->hw_cfg.valid = true;\n\tput_device(physdev);\n\n\treturn 0;\n}", "static int smcr_serv_conf_first_link(struct smc_sock *smc)\n{\n\tstruct smc_link *link = smc->conn.lnk;\n\tstruct smc_llc_qentry *qentry;\n\tint rc;": "static int smcr_serv_conf_first_link(struct smc_sock *smc)\n{\n\tstruct smc_link *link = smc->conn.lnk;\n\tstruct smc_llc_qentry *qentry;\n\tint rc;\n\n\tif (smcr_link_reg_rmb(link, smc->conn.rmb_desc))\n\t\treturn SMC_CLC_DECL_ERR_REGRMB;\n\n\t/* send CONFIRM LINK request to client over the RoCE fabric */\n\trc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);\n\tif (rc < 0)\n\t\treturn SMC_CLC_DECL_TIMEOUT_CL;\n\n\t/* receive CONFIRM LINK response from client over the RoCE fabric */\n\tqentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,\n\t\t\t SMC_LLC_CONFIRM_LINK);\n\tif (!qentry) {\n\t\tstruct smc_clc_msg_decline dclc;\n\n\t\trc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),\n\t\t\t\t SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);\n\t\treturn rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;\n\t}\n\tsmc_llc_save_peer_uid(qentry);\n\trc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);\n\tsmc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);\n\tif (rc)\n\t\treturn SMC_CLC_DECL_RMBE_EC;\n\n\t/* confirm_rkey is implicit on 1st contact */\n\tsmc->conn.rmb_desc->is_conf_rkey = true;\n\n\tsmc_llc_link_active(link);\n\tsmcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);\n\n\t/* initial contact - try to establish second link */\n\tsmc_llc_srv_add_link(link, NULL);\n\treturn 0;\n}", "static int jt8ew9_post_poweron(struct ccs_sensor *sensor)\n{\n\tstatic const struct ccs_reg_8 regs[] = {\n\t\t{ 0x30a3, 0xd8 }, /* Output port control : LVDS ports only */\n\t\t{ 0x30ae, 0x00 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */": "static int jt8ew9_post_poweron(struct ccs_sensor *sensor)\n{\n\tstatic const struct ccs_reg_8 regs[] = {\n\t\t{ 0x30a3, 0xd8 }, /* Output port control : LVDS ports only */\n\t\t{ 0x30ae, 0x00 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */\n\t\t{ 0x30af, 0xd0 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */\n\t\t{ 0x322d, 0x04 }, /* Adjusting Processing Image Size to Scaler Toshiba Recommendation Setting */\n\t\t{ 0x3255, 0x0f }, /* Horizontal Noise Reduction Control Toshiba Recommendation Setting */\n\t\t{ 0x3256, 0x15 }, /* Horizontal Noise Reduction Control Toshiba Recommendation Setting */\n\t\t{ 0x3258, 0x70 }, /* Analog Gain Control Toshiba Recommendation Setting */\n\t\t{ 0x3259, 0x70 }, /* Analog Gain Control Toshiba Recommendation Setting */\n\t\t{ 0x325f, 0x7c }, /* Analog Gain Control Toshiba Recommendation Setting */\n\t\t{ 0x3302, 0x06 }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */\n\t\t{ 0x3304, 0x00 }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */\n\t\t{ 0x3307, 0x22 }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */\n\t\t{ 0x3308, 0x8d }, /* Pixel Reference Voltage Control Toshiba Recommendation Setting */\n\t\t{ 0x331e, 0x0f }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */\n\t\t{ 0x3320, 0x30 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */\n\t\t{ 0x3321, 0x11 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */\n\t\t{ 0x3322, 0x98 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */\n\t\t{ 0x3323, 0x64 }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */\n\t\t{ 0x3325, 0x83 }, /* Read Out Timing Control Toshiba Recommendation Setting */\n\t\t{ 0x3330, 0x18 }, /* Read Out Timing Control Toshiba Recommendation Setting */\n\t\t{ 0x333c, 0x01 }, /* Read Out Timing Control Toshiba Recommendation Setting */\n\t\t{ 0x3345, 0x2f }, /* Black Hole Sun Correction Control Toshiba Recommendation Setting */\n\t\t{ 0x33de, 0x38 }, /* Horizontal Noise Reduction Control Toshiba Recommendation Setting */\n\t\t/* Taken from v03. No idea what the rest are. */\n\t\t{ 0x32e0, 0x05 },\n\t\t{ 0x32e1, 0x05 },\n\t\t{ 0x32e2, 0x04 },\n\t\t{ 0x32e5, 0x04 },\n\t\t{ 0x32e6, 0x04 },\n\n\t};\n\n\treturn ccs_write_addr_8s(sensor, regs, ARRAY_SIZE(regs));\n}", "static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)\n{\n\tstruct p_protocol *p = pi->data;\n\tenum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;\n\tint p_proto, p_discard_my_data, p_two_primaries, cf;": "static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)\n{\n\tstruct p_protocol *p = pi->data;\n\tenum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;\n\tint p_proto, p_discard_my_data, p_two_primaries, cf;\n\tstruct net_conf *nc, *old_net_conf, *new_net_conf = NULL;\n\tchar integrity_alg[SHARED_SECRET_MAX] = \"\";\n\tstruct crypto_shash *peer_integrity_tfm = NULL;\n\tvoid *int_dig_in = NULL, *int_dig_vv = NULL;\n\n\tp_proto\t\t= be32_to_cpu(p->protocol);\n\tp_after_sb_0p\t= be32_to_cpu(p->after_sb_0p);\n\tp_after_sb_1p\t= be32_to_cpu(p->after_sb_1p);\n\tp_after_sb_2p\t= be32_to_cpu(p->after_sb_2p);\n\tp_two_primaries = be32_to_cpu(p->two_primaries);\n\tcf\t\t= be32_to_cpu(p->conn_flags);\n\tp_discard_my_data = cf & CF_DISCARD_MY_DATA;\n\n\tif (connection->agreed_pro_version >= 87) {\n\t\tint err;\n\n\t\tif (pi->size > sizeof(integrity_alg))\n\t\t\treturn -EIO;\n\t\terr = drbd_recv_all(connection, integrity_alg, pi->size);\n\t\tif (err)\n\t\t\treturn err;\n\t\tintegrity_alg[SHARED_SECRET_MAX - 1] = 0;\n\t}\n\n\tif (pi->cmd != P_PROTOCOL_UPDATE) {\n\t\tclear_bit(CONN_DRY_RUN, &connection->flags);\n\n\t\tif (cf & CF_DRY_RUN)\n\t\t\tset_bit(CONN_DRY_RUN, &connection->flags);\n\n\t\trcu_read_lock();\n\t\tnc = rcu_dereference(connection->net_conf);\n\n\t\tif (p_proto != nc->wire_protocol) {\n\t\t\tdrbd_err(connection, \"incompatible %s settings\\n\", \"protocol\");\n\t\t\tgoto disconnect_rcu_unlock;\n\t\t}\n\n\t\tif (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {\n\t\t\tdrbd_err(connection, \"incompatible %s settings\\n\", \"after-sb-0pri\");\n\t\t\tgoto disconnect_rcu_unlock;\n\t\t}\n\n\t\tif (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {\n\t\t\tdrbd_err(connection, \"incompatible %s settings\\n\", \"after-sb-1pri\");\n\t\t\tgoto disconnect_rcu_unlock;\n\t\t}\n\n\t\tif (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {\n\t\t\tdrbd_err(connection, \"incompatible %s settings\\n\", \"after-sb-2pri\");\n\t\t\tgoto disconnect_rcu_unlock;\n\t\t}\n\n\t\tif (p_discard_my_data && nc->discard_my_data) {\n\t\t\tdrbd_err(connection, \"incompatible %s settings\\n\", \"discard-my-data\");\n\t\t\tgoto disconnect_rcu_unlock;\n\t\t}\n\n\t\tif (p_two_primaries != nc->two_primaries) {\n\t\t\tdrbd_err(connection, \"incompatible %s settings\\n\", \"allow-two-primaries\");\n\t\t\tgoto disconnect_rcu_unlock;\n\t\t}\n\n\t\tif (strcmp(integrity_alg, nc->integrity_alg)) {\n\t\t\tdrbd_err(connection, \"incompatible %s settings\\n\", \"data-integrity-alg\");\n\t\t\tgoto disconnect_rcu_unlock;\n\t\t}\n\n\t\trcu_read_unlock();\n\t}\n\n\tif (integrity_alg[0]) {\n\t\tint hash_size;\n\n\t\t/*\n\t\t * We can only change the peer data integrity algorithm\n\t\t * here. Changing our own data integrity algorithm\n\t\t * requires that we send a P_PROTOCOL_UPDATE packet at\n\t\t * the same time; otherwise, the peer has no way to\n\t\t * tell between which packets the algorithm should\n\t\t * change.\n\t\t */\n\n\t\tpeer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, 0);\n\t\tif (IS_ERR(peer_integrity_tfm)) {\n\t\t\tpeer_integrity_tfm = NULL;\n\t\t\tdrbd_err(connection, \"peer data-integrity-alg %s not supported\\n\",\n\t\t\t\t integrity_alg);\n\t\t\tgoto disconnect;\n\t\t}\n\n\t\thash_size = crypto_shash_digestsize(peer_integrity_tfm);\n\t\tint_dig_in = kmalloc(hash_size, GFP_KERNEL);\n\t\tint_dig_vv = kmalloc(hash_size, GFP_KERNEL);\n\t\tif (!(int_dig_in && int_dig_vv)) {\n\t\t\tdrbd_err(connection, \"Allocation of buffers for data integrity checking failed\\n\");\n\t\t\tgoto disconnect;\n\t\t}\n\t}\n\n\tnew_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);\n\tif (!new_net_conf)\n\t\tgoto disconnect;\n\n\tmutex_lock(&connection->data.mutex);\n\tmutex_lock(&connection->resource->conf_update);\n\told_net_conf = connection->net_conf;\n\t*new_net_conf = *old_net_conf;\n\n\tnew_net_conf->wire_protocol = p_proto;\n\tnew_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);\n\tnew_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);\n\tnew_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);\n\tnew_net_conf->two_primaries = p_two_primaries;\n\n\trcu_assign_pointer(connection->net_conf, new_net_conf);\n\tmutex_unlock(&connection->resource->conf_update);\n\tmutex_unlock(&connection->data.mutex);\n\n\tcrypto_free_shash(connection->peer_integrity_tfm);\n\tkfree(connection->int_dig_in);\n\tkfree(connection->int_dig_vv);\n\tconnection->peer_integrity_tfm = peer_integrity_tfm;\n\tconnection->int_dig_in = int_dig_in;\n\tconnection->int_dig_vv = int_dig_vv;\n\n\tif (strcmp(old_net_conf->integrity_alg, integrity_alg))\n\t\tdrbd_info(connection, \"peer data-integrity-alg: %s\\n\",\n\t\t\t integrity_alg[0] ? integrity_alg : \"(none)\");\n\n\tkvfree_rcu(old_net_conf);\n\treturn 0;\n\ndisconnect_rcu_unlock:\n\trcu_read_unlock();\ndisconnect:\n\tcrypto_free_shash(peer_integrity_tfm);\n\tkfree(int_dig_in);\n\tkfree(int_dig_vv);\n\tconn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);\n\treturn -EIO;\n}", "static int helene_x_pon(struct helene_priv *priv)\n{\n\t/* RFIN matching in power save (terrestrial) = ACTIVE */\n\t/* RFIN matching in power save (satellite) = ACTIVE */\n\tu8 dataT[] = { 0x06, 0x00, 0x02, 0x00 };": "static int helene_x_pon(struct helene_priv *priv)\n{\n\t/* RFIN matching in power save (terrestrial) = ACTIVE */\n\t/* RFIN matching in power save (satellite) = ACTIVE */\n\tu8 dataT[] = { 0x06, 0x00, 0x02, 0x00 };\n\t/* SAT_RF_ACTIVE = true, lnaOff = false, terrRfActive = true */\n\tu8 dataS[] = { 0x05, 0x06 };\n\tu8 cdata[] = {0x7A, 0x01};\n\tu8 data[20];\n\tu8 rdata[2];\n\n\t/* mode select */\n\thelene_write_reg(priv, 0x01, 0x00);\n\n\thelene_write_reg(priv, 0x67, dataT[3]);\n\thelene_write_reg(priv, 0x43, dataS[1]);\n\thelene_write_regs(priv, 0x5E, dataT, 3);\n\thelene_write_reg(priv, 0x0C, dataS[0]);\n\n\t/* Initial setting for internal logic block */\n\thelene_write_regs(priv, 0x99, cdata, sizeof(cdata));\n\n\t/* 0x81 - 0x94 */\n\tif (priv->xtal == SONY_HELENE_XTAL_16000)\n\t\tdata[0] = 0x10; /* xtal 16 MHz */\n\telse\n\t\tdata[0] = 0x18; /* xtal 24 MHz */\n\tdata[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */\n\tdata[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */\n\tdata[3] = 0x80; /* REFOUT signal output 500mVpp */\n\tdata[4] = 0x00; /* GPIO settings */\n\tdata[5] = 0x00; /* GPIO settings */\n\tdata[6] = 0xC4; /* Clock enable for internal logic block */\n\tdata[7] = 0x40; /* Start CPU boot-up */\n\tdata[8] = 0x10; /* For burst-write */\n\n\t/* Setting for internal RFAGC */\n\tdata[9] = 0x00;\n\tdata[10] = 0x45;\n\tdata[11] = 0x75;\n\n\tdata[12] = 0x07; /* Setting for analog block */\n\n\t/* Initial setting for internal analog block */\n\tdata[13] = 0x1C;\n\tdata[14] = 0x3F;\n\tdata[15] = 0x02;\n\tdata[16] = 0x10;\n\tdata[17] = 0x20;\n\tdata[18] = 0x0A;\n\tdata[19] = 0x00;\n\n\thelene_write_regs(priv, 0x81, data, sizeof(data));\n\n\t/* Setting for internal RFAGC */\n\thelene_write_reg(priv, 0x9B, 0x00);\n\n\tmsleep(20);\n\n\t/* Check CPU_STT/CPU_ERR */\n\thelene_read_regs(priv, 0x1A, rdata, sizeof(rdata));\n\n\tif (rdata[0] != 0x00) {\n\t\tdev_err(&priv->i2c->dev,\n\t\t\t\t\"HELENE tuner CPU error 0x%x\\n\", rdata[0]);\n\t\treturn -EIO;\n\t}\n\n\t/* VCO current setting */\n\tcdata[0] = 0x90;\n\tcdata[1] = 0x06;\n\thelene_write_regs(priv, 0x17, cdata, sizeof(cdata));\n\tmsleep(20);\n\thelene_read_reg(priv, 0x19, data);\n\thelene_write_reg(priv, 0x95, (uint8_t)((data[0] >> 4) & 0x0F));\n\n\t/* Disable IF signal output */\n\thelene_write_reg(priv, 0x74, 0x02);\n\n\t/* Standby setting for CPU */\n\thelene_write_reg(priv, 0x88, 0x00);\n\n\t/* Standby setting for internal logic block */\n\thelene_write_reg(priv, 0x87, 0xC0);\n\n\t/* Load capacitance control setting for crystal oscillator */\n\thelene_write_reg(priv, 0x80, 0x01);\n\n\t/* Satellite initial setting */\n\tcdata[0] = 0x07;\n\tcdata[1] = 0x00;\n\thelene_write_regs(priv, 0x41, cdata, sizeof(cdata));\n\n\tdev_info(&priv->i2c->dev,\n\t\t\t\"HELENE tuner x_pon done\\n\");\n\n\treturn 0;\n}", "static int blinkm_transfer_hw(struct i2c_client *client, int cmd)\n{\n\t/* the protocol is simple but non-standard:\n\t * e.g. cmd 'g' (= 0x67) for \"get device address\"\n\t * - which defaults to 0x09 - would be the sequence:": "static int blinkm_transfer_hw(struct i2c_client *client, int cmd)\n{\n\t/* the protocol is simple but non-standard:\n\t * e.g. cmd 'g' (= 0x67) for \"get device address\"\n\t * - which defaults to 0x09 - would be the sequence:\n\t * a) write 0x67 to the device (byte write)\n\t * b) read the value (0x09) back right after (byte read)\n\t *\n\t * Watch out for \"unfinished\" sequences (i.e. not enough reads\n\t * or writes after a command. It will make the blinkM misbehave.\n\t * Sequence is key here.\n\t */\n\n\t/* args / return are in private data struct */\n\tstruct blinkm_data *data = i2c_get_clientdata(client);\n\n\t/* We start hardware transfers which are not to be\n\t * mixed with other commands. Aquire a lock now. */\n\tif (mutex_lock_interruptible(&data->update_lock) < 0)\n\t\treturn -EAGAIN;\n\n\t/* switch cmd - usually write before reads */\n\tswitch (cmd) {\n\tcase BLM_FADE_RAND_RGB:\n\tcase BLM_GO_RGB:\n\tcase BLM_FADE_RGB:\n\t\tdata->args[0] = data->next_red;\n\t\tdata->args[1] = data->next_green;\n\t\tdata->args[2] = data->next_blue;\n\t\tblinkm_write(client, cmd, data->args);\n\t\tdata->red = data->args[0];\n\t\tdata->green = data->args[1];\n\t\tdata->blue = data->args[2];\n\t\tbreak;\n\tcase BLM_FADE_HSB:\n\tcase BLM_FADE_RAND_HSB:\n\t\tdata->args[0] = data->next_hue;\n\t\tdata->args[1] = data->next_saturation;\n\t\tdata->args[2] = data->next_brightness;\n\t\tblinkm_write(client, cmd, data->args);\n\t\tdata->hue = data->next_hue;\n\t\tdata->saturation = data->next_saturation;\n\t\tdata->brightness = data->next_brightness;\n\t\tbreak;\n\tcase BLM_PLAY_SCRIPT:\n\t\tdata->args[0] = data->script_id;\n\t\tdata->args[1] = data->script_repeats;\n\t\tdata->args[2] = data->script_startline;\n\t\tblinkm_write(client, cmd, data->args);\n\t\tbreak;\n\tcase BLM_STOP_SCRIPT:\n\t\tblinkm_write(client, cmd, NULL);\n\t\tbreak;\n\tcase BLM_GET_CUR_RGB:\n\t\tdata->args[0] = data->red;\n\t\tdata->args[1] = data->green;\n\t\tdata->args[2] = data->blue;\n\t\tblinkm_write(client, cmd, NULL);\n\t\tblinkm_read(client, cmd, data->args);\n\t\tdata->red = data->args[0];\n\t\tdata->green = data->args[1];\n\t\tdata->blue = data->args[2];\n\t\tbreak;\n\tcase BLM_GET_ADDR:\n\t\tdata->args[0] = data->i2c_addr;\n\t\tblinkm_write(client, cmd, NULL);\n\t\tblinkm_read(client, cmd, data->args);\n\t\tdata->i2c_addr = data->args[0];\n\t\tbreak;\n\tcase BLM_SET_TIME_ADJ:\n\tcase BLM_SET_FADE_SPEED:\n\tcase BLM_READ_SCRIPT_LINE:\n\tcase BLM_WRITE_SCRIPT_LINE:\n\tcase BLM_SET_SCRIPT_LR:\n\tcase BLM_SET_ADDR:\n\tcase BLM_GET_FW_VER:\n\tcase BLM_SET_STARTUP_PARAM:\n\t\tdev_err(&client->dev,\n\t\t\t\t\"BlinkM: cmd %d not implemented yet.\\n\", cmd);\n\t\tbreak;\n\tdefault:\n\t\tdev_err(&client->dev, \"BlinkM: unknown command %d\\n\", cmd);\n\t\tmutex_unlock(&data->update_lock);\n\t\treturn -EINVAL;\n\t}\t\t\t/* end switch(cmd) */\n\n\t/* transfers done, unlock */\n\tmutex_unlock(&data->update_lock);\n\treturn 0;\n}", "static void atmel_pmecc_substitute(struct atmel_pmecc_user *user)\n{\n\tint degree = get_sectorsize(user) == 512 ? 13 : 14;\n\tint cw_len = BIT(degree) - 1;\n\tint strength = get_strength(user);": "static void atmel_pmecc_substitute(struct atmel_pmecc_user *user)\n{\n\tint degree = get_sectorsize(user) == 512 ? 13 : 14;\n\tint cw_len = BIT(degree) - 1;\n\tint strength = get_strength(user);\n\ts16 *alpha_to = user->gf_tables->alpha_to;\n\ts16 *index_of = user->gf_tables->index_of;\n\ts16 *partial_syn = user->partial_syn;\n\ts16 *si;\n\tint i, j;\n\n\t/*\n\t * si[] is a table that holds the current syndrome value,\n\t * an element of that table belongs to the field\n\t */\n\tsi = user->si;\n\n\tmemset(&si[1], 0, sizeof(s16) * ((2 * strength) - 1));\n\n\t/* Computation 2t syndromes based on S(x) */\n\t/* Odd syndromes */\n\tfor (i = 1; i < 2 * strength; i += 2) {\n\t\tfor (j = 0; j < degree; j++) {\n\t\t\tif (partial_syn[i] & BIT(j))\n\t\t\t\tsi[i] = alpha_to[i * j] ^ si[i];\n\t\t}\n\t}\n\t/* Even syndrome = (Odd syndrome) ** 2 */\n\tfor (i = 2, j = 1; j <= strength; i = ++j << 1) {\n\t\tif (si[j] == 0) {\n\t\t\tsi[i] = 0;\n\t\t} else {\n\t\t\ts16 tmp;\n\n\t\t\ttmp = index_of[si[j]];\n\t\t\ttmp = (tmp * 2) % cw_len;\n\t\t\tsi[i] = alpha_to[tmp];\n\t\t}\n\t}\n}", "static void cs8409_fix_caps(struct hda_codec *codec, unsigned int nid)\n{\n\tint caps;\n\n\t/* CS8409 is simple HDA bridge and intended to be used with a remote": "static void cs8409_fix_caps(struct hda_codec *codec, unsigned int nid)\n{\n\tint caps;\n\n\t/* CS8409 is simple HDA bridge and intended to be used with a remote\n\t * companion codec. Most of input/output PIN(s) have only basic\n\t * capabilities. Receive and Transmit NID(s) have only OUTC and INC\n\t * capabilities and no presence detect capable (PDC) and call to\n\t * snd_hda_gen_build_controls() will mark them as non detectable\n\t * phantom jacks. However, a companion codec may be\n\t * connected to these pins which supports jack detect\n\t * capabilities. We have to override pin capabilities,\n\t * otherwise they will not be created as input devices.\n\t */\n\tcaps = snd_hdac_read_parm(&codec->core, nid, AC_PAR_PIN_CAP);\n\tif (caps >= 0)\n\t\tsnd_hdac_override_parm(&codec->core, nid, AC_PAR_PIN_CAP,\n\t\t\t\t (caps | (AC_PINCAP_IMP_SENSE | AC_PINCAP_PRES_DETECT)));\n\n\tsnd_hda_override_wcaps(codec, nid, (get_wcaps(codec, nid) | AC_WCAP_UNSOL_CAP));\n}", "static int qam_flip_spec(struct drx_demod_instance *demod, struct drx_channel *channel)\n{\n\tstruct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;\n\tstruct drxj_data *ext_attr = demod->my_ext_attr;\n\tint rc;": "static int qam_flip_spec(struct drx_demod_instance *demod, struct drx_channel *channel)\n{\n\tstruct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;\n\tstruct drxj_data *ext_attr = demod->my_ext_attr;\n\tint rc;\n\tu32 iqm_fs_rate_ofs = 0;\n\tu32 iqm_fs_rate_lo = 0;\n\tu16 qam_ctl_ena = 0;\n\tu16 data = 0;\n\tu16 equ_mode = 0;\n\tu16 fsm_state = 0;\n\tint i = 0;\n\tint ofsofs = 0;\n\n\t/* Silence the controlling of lc, equ, and the acquisition state machine */\n\trc = drxj_dap_read_reg16(dev_addr, SCU_RAM_QAM_CTL_ENA__A, &qam_ctl_ena, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_CTL_ENA__A, qam_ctl_ena & ~(SCU_RAM_QAM_CTL_ENA_ACQ__M | SCU_RAM_QAM_CTL_ENA_EQU__M | SCU_RAM_QAM_CTL_ENA_LC__M), 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\t/* freeze the frequency control loop */\n\trc = drxj_dap_write_reg16(dev_addr, QAM_LC_CF__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, QAM_LC_CF1__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\trc = drxj_dap_atomic_read_reg32(dev_addr, IQM_FS_RATE_OFS_LO__A, &iqm_fs_rate_ofs, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_atomic_read_reg32(dev_addr, IQM_FS_RATE_LO__A, &iqm_fs_rate_lo, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\tofsofs = iqm_fs_rate_lo - iqm_fs_rate_ofs;\n\tiqm_fs_rate_ofs = ~iqm_fs_rate_ofs + 1;\n\tiqm_fs_rate_ofs -= 2 * ofsofs;\n\n\t/* freeze dq/fq updating */\n\trc = drxj_dap_read_reg16(dev_addr, QAM_DQ_MODE__A, &data, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\tdata = (data & 0xfff9);\n\trc = drxj_dap_write_reg16(dev_addr, QAM_DQ_MODE__A, data, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, QAM_FQ_MODE__A, data, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\t/* lc_cp / _ci / _ca */\n\trc = drxj_dap_write_reg16(dev_addr, QAM_LC_CI__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, QAM_LC_EP__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, QAM_FQ_LA_FACTOR__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\t/* flip the spec */\n\trc = drxdap_fasi_write_reg32(dev_addr, IQM_FS_RATE_OFS_LO__A, iqm_fs_rate_ofs, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\text_attr->iqm_fs_rate_ofs = iqm_fs_rate_ofs;\n\text_attr->pos_image = (ext_attr->pos_image) ? false : true;\n\n\t/* freeze dq/fq updating */\n\trc = drxj_dap_read_reg16(dev_addr, QAM_DQ_MODE__A, &data, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\tequ_mode = data;\n\tdata = (data & 0xfff9);\n\trc = drxj_dap_write_reg16(dev_addr, QAM_DQ_MODE__A, data, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, QAM_FQ_MODE__A, data, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\tfor (i = 0; i < 28; i++) {\n\t\trc = drxj_dap_read_reg16(dev_addr, QAM_DQ_TAP_IM_EL0__A + (2 * i), &data, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t\trc = drxj_dap_write_reg16(dev_addr, QAM_DQ_TAP_IM_EL0__A + (2 * i), -data, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t}\n\n\tfor (i = 0; i < 24; i++) {\n\t\trc = drxj_dap_read_reg16(dev_addr, QAM_FQ_TAP_IM_EL0__A + (2 * i), &data, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t\trc = drxj_dap_write_reg16(dev_addr, QAM_FQ_TAP_IM_EL0__A + (2 * i), -data, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t}\n\n\tdata = equ_mode;\n\trc = drxj_dap_write_reg16(dev_addr, QAM_DQ_MODE__A, data, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, QAM_FQ_MODE__A, data, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\trc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_FSM_STATE_TGT__A, 4, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\ti = 0;\n\twhile ((fsm_state != 4) && (i++ < 100)) {\n\t\trc = drxj_dap_read_reg16(dev_addr, SCU_RAM_QAM_FSM_STATE__A, &fsm_state, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, SCU_RAM_QAM_CTL_ENA__A, (qam_ctl_ena | 0x0016), 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\treturn 0;\nrw_error:\n\treturn rc;\n\n}", "static void ca0132_alt_dsp_initial_mic_setup(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\tunsigned int tmp;\n": "static void ca0132_alt_dsp_initial_mic_setup(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\tunsigned int tmp;\n\n\tchipio_set_stream_control(codec, 0x03, 0);\n\tchipio_set_stream_control(codec, 0x04, 0);\n\n\tchipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);\n\tchipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);\n\n\ttmp = FLOAT_THREE;\n\tdspio_set_uint_param(codec, 0x80, 0x00, tmp);\n\n\tchipio_set_stream_control(codec, 0x03, 1);\n\tchipio_set_stream_control(codec, 0x04, 1);\n\n\tswitch (ca0132_quirk(spec)) {\n\tcase QUIRK_SBZ:\n\t\tchipio_write(codec, 0x18b098, 0x0000000c);\n\t\tchipio_write(codec, 0x18b09C, 0x0000000c);\n\t\tbreak;\n\tcase QUIRK_AE5:\n\t\tchipio_write(codec, 0x18b098, 0x0000000c);\n\t\tchipio_write(codec, 0x18b09c, 0x0000004c);\n\t\tbreak;\n\tdefault:\n\t\tbreak;\n\t}\n}", "static int pvr2_enumaudio(struct file *file, void *priv, struct v4l2_audio *vin)\n{\n\t/* pkt: FIXME: We are returning one \"fake\" input here\n\t which could very well be called \"whatever_we_like\".\n\t This is for apps that want to see an audio input": "static int pvr2_enumaudio(struct file *file, void *priv, struct v4l2_audio *vin)\n{\n\t/* pkt: FIXME: We are returning one \"fake\" input here\n\t which could very well be called \"whatever_we_like\".\n\t This is for apps that want to see an audio input\n\t just to feel comfortable, as well as to test if\n\t it can do stereo or sth. There is actually no guarantee\n\t that the actual audio input cannot change behind the app's\n\t back, but most applications should not mind that either.\n\n\t Hopefully, mplayer people will work with us on this (this\n\t whole mess is to support mplayer pvr://), or Hans will come\n\t up with a more standard way to say \"we have inputs but we\n\t don 't want you to change them independent of video\" which\n\t will sort this mess.\n\t */\n\n\tif (vin->index > 0)\n\t\treturn -EINVAL;\n\tstrscpy(vin->name, \"PVRUSB2 Audio\", sizeof(vin->name));\n\tvin->capability = V4L2_AUDCAP_STEREO;\n\treturn 0;\n}", "static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)\n{\n\tstruct mxic_ecc_engine *mxic = nand_to_mxic(nand);\n\tstruct nand_ecc_props *conf = &nand->ecc.ctx.conf;\n\tstruct nand_ecc_props *reqs = &nand->ecc.requirements;": "static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)\n{\n\tstruct mxic_ecc_engine *mxic = nand_to_mxic(nand);\n\tstruct nand_ecc_props *conf = &nand->ecc.ctx.conf;\n\tstruct nand_ecc_props *reqs = &nand->ecc.requirements;\n\tstruct nand_ecc_props *user = &nand->ecc.user_conf;\n\tstruct mtd_info *mtd = nanddev_to_mtd(nand);\n\tint step_size = 0, strength = 0, desired_correction = 0, steps, idx;\n\tstatic const int possible_strength[] = {4, 8, 40, 48};\n\tstatic const int spare_size[] = {32, 32, 96, 96};\n\tstruct mxic_ecc_ctx *ctx;\n\tu32 spare_reg;\n\tint ret;\n\n\tctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);\n\tif (!ctx)\n\t\treturn -ENOMEM;\n\n\tnand->ecc.ctx.priv = ctx;\n\n\t/* Only large page NAND chips may use BCH */\n\tif (mtd->oobsize < 64) {\n\t\tpr_err(\"BCH cannot be used with small page NAND chips\\n\");\n\t\treturn -EINVAL;\n\t}\n\n\tmtd_set_ooblayout(mtd, &mxic_ecc_ooblayout_ops);\n\n\t/* Enable all status bits */\n\twritel(TRANS_CMPLT | SDMA_MAIN | SDMA_SPARE | ECC_ERR |\n\t TO_SPARE | TO_MAIN, mxic->regs + INTRPT_STS_EN);\n\n\t/* Configure the correction depending on the NAND device topology */\n\tif (user->step_size && user->strength) {\n\t\tstep_size = user->step_size;\n\t\tstrength = user->strength;\n\t} else if (reqs->step_size && reqs->strength) {\n\t\tstep_size = reqs->step_size;\n\t\tstrength = reqs->strength;\n\t}\n\n\tif (step_size && strength) {\n\t\tsteps = mtd->writesize / step_size;\n\t\tdesired_correction = steps * strength;\n\t}\n\n\t/* Step size is fixed to 1kiB, strength may vary (4 possible values) */\n\tconf->step_size = SZ_1K;\n\tsteps = mtd->writesize / conf->step_size;\n\n\tctx->status = devm_kzalloc(dev, steps * sizeof(u8), GFP_KERNEL);\n\tif (!ctx->status)\n\t\treturn -ENOMEM;\n\n\tif (desired_correction) {\n\t\tstrength = desired_correction / steps;\n\n\t\tfor (idx = 0; idx < ARRAY_SIZE(possible_strength); idx++)\n\t\t\tif (possible_strength[idx] >= strength)\n\t\t\t\tbreak;\n\n\t\tidx = min_t(unsigned int, idx,\n\t\t\t ARRAY_SIZE(possible_strength) - 1);\n\t} else {\n\t\t/* Missing data, maximize the correction */\n\t\tidx = ARRAY_SIZE(possible_strength) - 1;\n\t}\n\n\t/* Tune the selected strength until it fits in the OOB area */\n\tfor (; idx >= 0; idx--) {\n\t\tif (spare_size[idx] * steps <= mtd->oobsize)\n\t\t\tbreak;\n\t}\n\n\t/* This engine cannot be used with this NAND device */\n\tif (idx < 0)\n\t\treturn -EINVAL;\n\n\t/* Configure the engine for the desired strength */\n\twritel(ECC_TYP(idx), mxic->regs + DP_CONFIG);\n\tconf->strength = possible_strength[idx];\n\tspare_reg = readl(mxic->regs + SPARE_SIZE);\n\n\tctx->steps = steps;\n\tctx->data_step_sz = mtd->writesize / steps;\n\tctx->oob_step_sz = mtd->oobsize / steps;\n\tctx->parity_sz = PARITY_SZ(spare_reg);\n\tctx->meta_sz = META_SZ(spare_reg);\n\n\t/* Ensure buffers will contain enough bytes to store the STAT_BYTES */\n\tctx->req_ctx.oob_buffer_size = nanddev_per_page_oobsize(nand) +\n\t\t\t\t\t(ctx->steps * STAT_BYTES);\n\tret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand);\n\tif (ret)\n\t\treturn ret;\n\n\tctx->oobwithstat = kmalloc(mtd->oobsize + (ctx->steps * STAT_BYTES),\n\t\t\t\t GFP_KERNEL);\n\tif (!ctx->oobwithstat) {\n\t\tret = -ENOMEM;\n\t\tgoto cleanup_req_tweak;\n\t}\n\n\tsg_init_table(ctx->sg, 2);\n\n\t/* Configuration dump and sanity checks */\n\tdev_err(dev, \"DPE version number: %d\\n\",\n\t\treadl(mxic->regs + DP_VER) >> DP_VER_OFFSET);\n\tdev_err(dev, \"Chunk size: %d\\n\", readl(mxic->regs + CHUNK_SIZE));\n\tdev_err(dev, \"Main size: %d\\n\", readl(mxic->regs + MAIN_SIZE));\n\tdev_err(dev, \"Spare size: %d\\n\", SPARE_SZ(spare_reg));\n\tdev_err(dev, \"Rsv size: %ld\\n\", RSV_SZ(spare_reg));\n\tdev_err(dev, \"Parity size: %d\\n\", ctx->parity_sz);\n\tdev_err(dev, \"Meta size: %d\\n\", ctx->meta_sz);\n\n\tif ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=\n\t SPARE_SZ(spare_reg)) {\n\t\tdev_err(dev, \"Wrong OOB configuration: %d + %d + %ld != %d\\n\",\n\t\t\tctx->meta_sz, ctx->parity_sz, RSV_SZ(spare_reg),\n\t\t\tSPARE_SZ(spare_reg));\n\t\tret = -EINVAL;\n\t\tgoto free_oobwithstat;\n\t}\n\n\tif (ctx->oob_step_sz != SPARE_SZ(spare_reg)) {\n\t\tdev_err(dev, \"Wrong OOB configuration: %d != %d\\n\",\n\t\t\tctx->oob_step_sz, SPARE_SZ(spare_reg));\n\t\tret = -EINVAL;\n\t\tgoto free_oobwithstat;\n\t}\n\n\treturn 0;\n\nfree_oobwithstat:\n\tkfree(ctx->oobwithstat);\ncleanup_req_tweak:\n\tnand_ecc_cleanup_req_tweaking(&ctx->req_ctx);\n\n\treturn ret;\n}", "static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)\n{\n\trxq->pit_size_cnt = DPMAIF_PIT_COUNT;\n\trxq->pit_rd_idx = 0;\n\trxq->pit_wr_idx = 0;": "static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)\n{\n\trxq->pit_size_cnt = DPMAIF_PIT_COUNT;\n\trxq->pit_rd_idx = 0;\n\trxq->pit_wr_idx = 0;\n\trxq->pit_release_rd_idx = 0;\n\trxq->expect_pit_seq = 0;\n\trxq->pit_remain_release_cnt = 0;\n\tmemset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));\n\n\trxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,\n\t\t\t\t\t rxq->pit_size_cnt * sizeof(struct dpmaif_pit),\n\t\t\t\t\t &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);\n\tif (!rxq->pit_base)\n\t\treturn -ENOMEM;\n\n\trxq->bat_req = &rxq->dpmaif_ctrl->bat_req;\n\tatomic_inc(&rxq->bat_req->refcnt);\n\n\trxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;\n\tatomic_inc(&rxq->bat_frag->refcnt);\n\treturn 0;\n}", "static int get_device_capabilities(struct drx_demod_instance *demod)\n{\n\tstruct drx_common_attr *common_attr = (struct drx_common_attr *) (NULL);\n\tstruct drxj_data *ext_attr = (struct drxj_data *) NULL;\n\tstruct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL);": "static int get_device_capabilities(struct drx_demod_instance *demod)\n{\n\tstruct drx_common_attr *common_attr = (struct drx_common_attr *) (NULL);\n\tstruct drxj_data *ext_attr = (struct drxj_data *) NULL;\n\tstruct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL);\n\tu16 sio_pdr_ohw_cfg = 0;\n\tu32 sio_top_jtagid_lo = 0;\n\tu16 bid = 0;\n\tint rc;\n\n\tcommon_attr = (struct drx_common_attr *) demod->my_common_attr;\n\text_attr = (struct drxj_data *) demod->my_ext_attr;\n\tdev_addr = demod->my_i2c_dev_addr;\n\n\trc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_read_reg16(dev_addr, SIO_PDR_OHW_CFG__A, &sio_pdr_ohw_cfg, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY__PRE, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\tswitch ((sio_pdr_ohw_cfg & SIO_PDR_OHW_CFG_FREF_SEL__M)) {\n\tcase 0:\n\t\t/* ignore (bypass ?) */\n\t\tbreak;\n\tcase 1:\n\t\t/* 27 MHz */\n\t\tcommon_attr->osc_clock_freq = 27000;\n\t\tbreak;\n\tcase 2:\n\t\t/* 20.25 MHz */\n\t\tcommon_attr->osc_clock_freq = 20250;\n\t\tbreak;\n\tcase 3:\n\t\t/* 4 MHz */\n\t\tcommon_attr->osc_clock_freq = 4000;\n\t\tbreak;\n\tdefault:\n\t\treturn -EIO;\n\t}\n\n\t/*\n\t Determine device capabilities\n\t Based on pinning v47\n\t */\n\trc = drxdap_fasi_read_reg32(dev_addr, SIO_TOP_JTAGID_LO__A, &sio_top_jtagid_lo, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\text_attr->mfx = (u8) ((sio_top_jtagid_lo >> 29) & 0xF);\n\n\tswitch ((sio_top_jtagid_lo >> 12) & 0xFF) {\n\tcase 0x31:\n\t\trc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t\trc = drxj_dap_read_reg16(dev_addr, SIO_PDR_UIO_IN_HI__A, &bid, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t\tbid = (bid >> 10) & 0xf;\n\t\trc = drxj_dap_write_reg16(dev_addr, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY__PRE, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\n\t\text_attr->has_lna = true;\n\t\text_attr->has_ntsc = false;\n\t\text_attr->has_btsc = false;\n\t\text_attr->has_oob = false;\n\t\text_attr->has_smatx = true;\n\t\text_attr->has_smarx = false;\n\t\text_attr->has_gpio = false;\n\t\text_attr->has_irqn = false;\n\t\tbreak;\n\tcase 0x33:\n\t\text_attr->has_lna = false;\n\t\text_attr->has_ntsc = false;\n\t\text_attr->has_btsc = false;\n\t\text_attr->has_oob = false;\n\t\text_attr->has_smatx = true;\n\t\text_attr->has_smarx = false;\n\t\text_attr->has_gpio = false;\n\t\text_attr->has_irqn = false;\n\t\tbreak;\n\tcase 0x45:\n\t\text_attr->has_lna = true;\n\t\text_attr->has_ntsc = true;\n\t\text_attr->has_btsc = false;\n\t\text_attr->has_oob = false;\n\t\text_attr->has_smatx = true;\n\t\text_attr->has_smarx = true;\n\t\text_attr->has_gpio = true;\n\t\text_attr->has_irqn = false;\n\t\tbreak;\n\tcase 0x46:\n\t\text_attr->has_lna = false;\n\t\text_attr->has_ntsc = true;\n\t\text_attr->has_btsc = false;\n\t\text_attr->has_oob = false;\n\t\text_attr->has_smatx = true;\n\t\text_attr->has_smarx = true;\n\t\text_attr->has_gpio = true;\n\t\text_attr->has_irqn = false;\n\t\tbreak;\n\tcase 0x41:\n\t\text_attr->has_lna = true;\n\t\text_attr->has_ntsc = true;\n\t\text_attr->has_btsc = true;\n\t\text_attr->has_oob = false;\n\t\text_attr->has_smatx = true;\n\t\text_attr->has_smarx = true;\n\t\text_attr->has_gpio = true;\n\t\text_attr->has_irqn = false;\n\t\tbreak;\n\tcase 0x43:\n\t\text_attr->has_lna = false;\n\t\text_attr->has_ntsc = true;\n\t\text_attr->has_btsc = true;\n\t\text_attr->has_oob = false;\n\t\text_attr->has_smatx = true;\n\t\text_attr->has_smarx = true;\n\t\text_attr->has_gpio = true;\n\t\text_attr->has_irqn = false;\n\t\tbreak;\n\tcase 0x32:\n\t\text_attr->has_lna = true;\n\t\text_attr->has_ntsc = false;\n\t\text_attr->has_btsc = false;\n\t\text_attr->has_oob = true;\n\t\text_attr->has_smatx = true;\n\t\text_attr->has_smarx = true;\n\t\text_attr->has_gpio = true;\n\t\text_attr->has_irqn = true;\n\t\tbreak;\n\tcase 0x34:\n\t\text_attr->has_lna = false;\n\t\text_attr->has_ntsc = true;\n\t\text_attr->has_btsc = true;\n\t\text_attr->has_oob = true;\n\t\text_attr->has_smatx = true;\n\t\text_attr->has_smarx = true;\n\t\text_attr->has_gpio = true;\n\t\text_attr->has_irqn = true;\n\t\tbreak;\n\tcase 0x42:\n\t\text_attr->has_lna = true;\n\t\text_attr->has_ntsc = true;\n\t\text_attr->has_btsc = true;\n\t\text_attr->has_oob = true;\n\t\text_attr->has_smatx = true;\n\t\text_attr->has_smarx = true;\n\t\text_attr->has_gpio = true;\n\t\text_attr->has_irqn = true;\n\t\tbreak;\n\tcase 0x44:\n\t\text_attr->has_lna = false;\n\t\text_attr->has_ntsc = true;\n\t\text_attr->has_btsc = true;\n\t\text_attr->has_oob = true;\n\t\text_attr->has_smatx = true;\n\t\text_attr->has_smarx = true;\n\t\text_attr->has_gpio = true;\n\t\text_attr->has_irqn = true;\n\t\tbreak;\n\tdefault:\n\t\t/* Unknown device variant */\n\t\treturn -EIO;\n\t\tbreak;\n\t}\n\n\treturn 0;\nrw_error:\n\treturn rc;\n}", "static void csd_lock_print_extended(struct __call_single_data *csd, int cpu)\n{\n\tstruct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu);\n\tunsigned int srccpu = csd->node.src;\n\tstruct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu);": "static void csd_lock_print_extended(struct __call_single_data *csd, int cpu)\n{\n\tstruct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu);\n\tunsigned int srccpu = csd->node.src;\n\tstruct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu);\n\tstruct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);\n\tunsigned int now;\n\tunion cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)];\n\tunsigned int n_data = 0, i;\n\n\tdata[0].val = READ_ONCE(cfd_seq);\n\tnow = data[0].u.cnt;\n\n\tcfd_seq_data_add(pcpu->seq_queue,\t\t\tsrccpu, cpu,\t CFD_SEQ_QUEUE, data, &n_data, now);\n\tcfd_seq_data_add(pcpu->seq_ipi,\t\t\t\tsrccpu, cpu,\t CFD_SEQ_IPI, data, &n_data, now);\n\tcfd_seq_data_add(pcpu->seq_noipi,\t\t\tsrccpu, cpu,\t CFD_SEQ_NOIPI, data, &n_data, now);\n\n\tcfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu),\tsrccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now);\n\tcfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now);\n\n\tcfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now);\n\tcfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now);\n\tcfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now);\n\tcfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now);\n\tcfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now);\n\n\tfor (i = 0; i < n_data; i++) {\n\t\tpr_alert(\"\\tcsd: cnt(%07x): %04x->%04x %s\\n\",\n\t\t\t data[i].u.cnt & ~0x80000000U, data[i].u.src,\n\t\t\t data[i].u.dst, csd_lock_get_type(data[i].u.type));\n\t}\n\tpr_alert(\"\\tcsd: cnt now: %07x\\n\", now);\n}", "static void rxrpc_set_rto(struct rxrpc_peer *peer)\n{\n\tu32 rto;\n\n\t/* 1. If rtt variance happened to be less 50msec, it is hallucination.": "static void rxrpc_set_rto(struct rxrpc_peer *peer)\n{\n\tu32 rto;\n\n\t/* 1. If rtt variance happened to be less 50msec, it is hallucination.\n\t * It cannot be less due to utterly erratic ACK generation made\n\t * at least by solaris and freebsd. \"Erratic ACKs\" has _nothing_\n\t * to do with delayed acks, because at cwnd>2 true delack timeout\n\t * is invisible. Actually, Linux-2.4 also generates erratic\n\t * ACKs in some circumstances.\n\t */\n\trto = __rxrpc_set_rto(peer);\n\n\t/* 2. Fixups made earlier cannot be right.\n\t * If we do not estimate RTO correctly without them,\n\t * all the algo is pure shit and should be replaced\n\t * with correct one. It is exactly, which we pretend to do.\n\t */\n\n\t/* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo\n\t * guarantees that rto is higher.\n\t */\n\tpeer->rto_j = rxrpc_bound_rto(rto);\n}", "static void cs8409_cs42l42_jack_unsol_event(struct hda_codec *codec, unsigned int res)\n{\n\tstruct cs8409_spec *spec = codec->spec;\n\tstruct sub_codec *cs42l42 = spec->scodecs[CS8409_CODEC0];\n\tstruct hda_jack_tbl *jk;": "static void cs8409_cs42l42_jack_unsol_event(struct hda_codec *codec, unsigned int res)\n{\n\tstruct cs8409_spec *spec = codec->spec;\n\tstruct sub_codec *cs42l42 = spec->scodecs[CS8409_CODEC0];\n\tstruct hda_jack_tbl *jk;\n\n\t/* jack_unsol_event() will be called every time gpio line changing state.\n\t * In this case gpio4 line goes up as a result of reading interrupt status\n\t * registers in previous cs8409_jack_unsol_event() call.\n\t * We don't need to handle this event, ignoring...\n\t */\n\tif (res & cs42l42->irq_mask)\n\t\treturn;\n\n\tif (cs42l42_jack_unsol_event(cs42l42)) {\n\t\tsnd_hda_set_pin_ctl(codec, CS8409_CS42L42_SPK_PIN_NID,\n\t\t\t\t cs42l42->hp_jack_in ? 0 : PIN_OUT);\n\t\t/* Report jack*/\n\t\tjk = snd_hda_jack_tbl_get_mst(codec, CS8409_CS42L42_HP_PIN_NID, 0);\n\t\tif (jk)\n\t\t\tsnd_hda_jack_unsol_event(codec, (jk->tag << AC_UNSOL_RES_TAG_SHIFT) &\n\t\t\t\t\t\t\tAC_UNSOL_RES_TAG);\n\t\t/* Report jack*/\n\t\tjk = snd_hda_jack_tbl_get_mst(codec, CS8409_CS42L42_AMIC_PIN_NID, 0);\n\t\tif (jk)\n\t\t\tsnd_hda_jack_unsol_event(codec, (jk->tag << AC_UNSOL_RES_TAG_SHIFT) &\n\t\t\t\t\t\t\t AC_UNSOL_RES_TAG);\n\t}\n}", "static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)\n{\n\tvoid *augmented_args = NULL;\n\t/*\n\t * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter": "static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)\n{\n\tvoid *augmented_args = NULL;\n\t/*\n\t * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter\n\t * and there we get all 6 syscall args plus the tracepoint common fields\n\t * that gets calculated at the start and the syscall_nr (another long).\n\t * So we check if that is the case and if so don't look after the\n\t * sc->args_size but always after the full raw_syscalls:sys_enter payload,\n\t * which is fixed.\n\t *\n\t * We'll revisit this later to pass s->args_size to the BPF augmenter\n\t * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it\n\t * copies only what we need for each syscall, like what happens when we\n\t * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace\n\t * traffic to just what is needed for each syscall.\n\t */\n\tint args_size = raw_augmented_args_size ?: sc->args_size;\n\n\t*augmented_args_size = sample->raw_size - args_size;\n\tif (*augmented_args_size > 0)\n\t\taugmented_args = sample->raw_data + args_size;\n\n\treturn augmented_args;\n}", "static void ca0132_alt_init_analog_mics(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\tunsigned int tmp;\n": "static void ca0132_alt_init_analog_mics(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\tunsigned int tmp;\n\n\t/* Mic 1 Setup */\n\tchipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);\n\tchipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);\n\tif (ca0132_quirk(spec) == QUIRK_R3DI) {\n\t\tchipio_set_conn_rate(codec, 0x0F, SR_96_000);\n\t\ttmp = FLOAT_ONE;\n\t} else\n\t\ttmp = FLOAT_THREE;\n\tdspio_set_uint_param(codec, 0x80, 0x00, tmp);\n\n\t/* Mic 2 setup (not present on desktop cards) */\n\tchipio_set_conn_rate(codec, MEM_CONNID_MICIN2, SR_96_000);\n\tchipio_set_conn_rate(codec, MEM_CONNID_MICOUT2, SR_96_000);\n\tif (ca0132_quirk(spec) == QUIRK_R3DI)\n\t\tchipio_set_conn_rate(codec, 0x0F, SR_96_000);\n\ttmp = FLOAT_ZERO;\n\tdspio_set_uint_param(codec, 0x80, 0x01, tmp);\n}", "static void tcp_set_rto(struct sock *sk)\n{\n\tconst struct tcp_sock *tp = tcp_sk(sk);\n\t/* Old crap is replaced with new one. 8)\n\t *": "static void tcp_set_rto(struct sock *sk)\n{\n\tconst struct tcp_sock *tp = tcp_sk(sk);\n\t/* Old crap is replaced with new one. 8)\n\t *\n\t * More seriously:\n\t * 1. If rtt variance happened to be less 50msec, it is hallucination.\n\t * It cannot be less due to utterly erratic ACK generation made\n\t * at least by solaris and freebsd. \"Erratic ACKs\" has _nothing_\n\t * to do with delayed acks, because at cwnd>2 true delack timeout\n\t * is invisible. Actually, Linux-2.4 also generates erratic\n\t * ACKs in some circumstances.\n\t */\n\tinet_csk(sk)->icsk_rto = __tcp_set_rto(tp);\n\n\t/* 2. Fixups made earlier cannot be right.\n\t * If we do not estimate RTO correctly without them,\n\t * all the algo is pure shit and should be replaced\n\t * with correct one. It is exactly, which we pretend to do.\n\t */\n\n\t/* NOTE: clamping at TCP_RTO_MIN is not required, current algo\n\t * guarantees that rto is higher.\n\t */\n\ttcp_bound_rto(sk);\n}", "static int runtestsingle(int readwriteflag, int exclude_user, int arraytest)\n{\n\tint i,j;\n\tsize_t res;\n\tunsigned long long breaks, needed;": "static int runtestsingle(int readwriteflag, int exclude_user, int arraytest)\n{\n\tint i,j;\n\tsize_t res;\n\tunsigned long long breaks, needed;\n\tint readint;\n\tint readintarraybig[2*DAWR_LENGTH_MAX/sizeof(int)];\n\tint *readintalign;\n\tvolatile int *ptr;\n\tint break_fd;\n\tint loop_num = MAX_LOOPS - (rand() % 100); /* provide some variability */\n\tvolatile int *k;\n\t__u64 len;\n\n\t/* align to 0x400 boundary as required by DAWR */\n\treadintalign = (int *)(((unsigned long)readintarraybig + 0x7ff) &\n\t\t\t 0xfffffffffffff800);\n\n\tptr = &readint;\n\tif (arraytest)\n\t\tptr = &readintalign[0];\n\n\tlen = arraytest ? DAWR_LENGTH_MAX : sizeof(int);\n\tbreak_fd = perf_process_event_open_exclude_user(readwriteflag, (__u64)ptr,\n\t\t\t\t\t\t\tlen, exclude_user);\n\tif (break_fd < 0) {\n\t\tperror(\"perf_process_event_open_exclude_user\");\n\t\texit(1);\n\t}\n\n\t/* start counters */\n\tioctl(break_fd, PERF_EVENT_IOC_ENABLE);\n\n\t/* Test a bunch of reads and writes */\n\tk = &readint;\n\tfor (i = 0; i < loop_num; i++) {\n\t\tif (arraytest)\n\t\t\tk = &(readintalign[i % (DAWR_LENGTH_MAX/sizeof(int))]);\n\n\t\tj = *k;\n\t\t*k = j;\n\t}\n\n\t/* stop counters */\n\tioctl(break_fd, PERF_EVENT_IOC_DISABLE);\n\n\t/* read and check counters */\n\tres = read(break_fd, &breaks, sizeof(unsigned long long));\n\tassert(res == sizeof(unsigned long long));\n\t/* we read and write each loop, so subtract the ones we are counting */\n\tneeded = 0;\n\tif (readwriteflag & HW_BREAKPOINT_R)\n\t\tneeded += loop_num;\n\tif (readwriteflag & HW_BREAKPOINT_W)\n\t\tneeded += loop_num;\n\tneeded = needed * (1 - exclude_user);\n\tprintf(\"TESTED: addr:0x%lx brks:% 8lld loops:% 8i rw:%i !user:%i array:%i\\n\",\n\t (unsigned long int)ptr, breaks, loop_num, readwriteflag, exclude_user, arraytest);\n\tif (breaks != needed) {\n\t\tprintf(\"FAILED: 0x%lx brks:%lld needed:%lli %i %i %i\\n\\n\",\n\t\t (unsigned long int)ptr, breaks, needed, loop_num, readwriteflag, exclude_user);\n\t\treturn 1;\n\t}\n\tclose(break_fd);\n\n\treturn 0;\n}", "static bool rt1316_readable_register(struct device *dev, unsigned int reg)\n{\n\tswitch (reg) {\n\tcase 0x2f0a:\n\tcase 0x2f36:": "static bool rt1316_readable_register(struct device *dev, unsigned int reg)\n{\n\tswitch (reg) {\n\tcase 0x2f0a:\n\tcase 0x2f36:\n\tcase 0x3203 ... 0x320e:\n\tcase 0xc000 ... 0xc7b4:\n\tcase 0xcf00 ... 0xcf03:\n\tcase 0xd101 ... 0xd103:\n\tcase SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1316_SDCA_ENT_UDMPU21, RT1316_SDCA_CTL_UDMPU_CLUSTER, 0):\n\tcase SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1316_SDCA_ENT_FU21, RT1316_SDCA_CTL_FU_MUTE, CH_L):\n\tcase SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1316_SDCA_ENT_FU21, RT1316_SDCA_CTL_FU_MUTE, CH_R):\n\tcase SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1316_SDCA_ENT_PDE23, RT1316_SDCA_CTL_REQ_POWER_STATE, 0):\n\tcase SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1316_SDCA_ENT_PDE27, RT1316_SDCA_CTL_REQ_POWER_STATE, 0):\n\tcase SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1316_SDCA_ENT_PDE22, RT1316_SDCA_CTL_REQ_POWER_STATE, 0):\n\tcase SDW_SDCA_CTL(FUNC_NUM_SMART_AMP, RT1316_SDCA_ENT_PDE24, RT1316_SDCA_CTL_REQ_POWER_STATE, 0):\n\t\treturn true;\n\tdefault:\n\t\treturn false;\n\t}\n}", "static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)\n{\n\t/*\n\t * Order state is used to detect strong link cycles, but only for BTF\n\t * kinds that are or could be an independent definition (i.e.,": "static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)\n{\n\t/*\n\t * Order state is used to detect strong link cycles, but only for BTF\n\t * kinds that are or could be an independent definition (i.e.,\n\t * stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays,\n\t * func_protos, modifiers are just means to get to these definitions.\n\t * Int/void don't need definitions, they are assumed to be always\n\t * properly defined. We also ignore datasec, var, and funcs for now.\n\t * So for all non-defining kinds, we never even set ordering state,\n\t * for defining kinds we set ORDERING and subsequently ORDERED if it\n\t * forms a strong link.\n\t */\n\tstruct btf_dump_type_aux_state *tstate = &d->type_states[id];\n\tconst struct btf_type *t;\n\t__u16 vlen;\n\tint err, i;\n\n\t/* return true, letting typedefs know that it's ok to be emitted */\n\tif (tstate->order_state == ORDERED)\n\t\treturn 1;\n\n\tt = btf__type_by_id(d->btf, id);\n\n\tif (tstate->order_state == ORDERING) {\n\t\t/* type loop, but resolvable through fwd declaration */\n\t\tif (btf_is_composite(t) && through_ptr && t->name_off != 0)\n\t\t\treturn 0;\n\t\tpr_warn(\"unsatisfiable type cycle, id:[%u]\\n\", id);\n\t\treturn -ELOOP;\n\t}\n\n\tswitch (btf_kind(t)) {\n\tcase BTF_KIND_INT:\n\tcase BTF_KIND_FLOAT:\n\t\ttstate->order_state = ORDERED;\n\t\treturn 0;\n\n\tcase BTF_KIND_PTR:\n\t\terr = btf_dump_order_type(d, t->type, true);\n\t\ttstate->order_state = ORDERED;\n\t\treturn err;\n\n\tcase BTF_KIND_ARRAY:\n\t\treturn btf_dump_order_type(d, btf_array(t)->type, false);\n\n\tcase BTF_KIND_STRUCT:\n\tcase BTF_KIND_UNION: {\n\t\tconst struct btf_member *m = btf_members(t);\n\t\t/*\n\t\t * struct/union is part of strong link, only if it's embedded\n\t\t * (so no ptr in a path) or it's anonymous (so has to be\n\t\t * defined inline, even if declared through ptr)\n\t\t */\n\t\tif (through_ptr && t->name_off != 0)\n\t\t\treturn 0;\n\n\t\ttstate->order_state = ORDERING;\n\n\t\tvlen = btf_vlen(t);\n\t\tfor (i = 0; i < vlen; i++, m++) {\n\t\t\terr = btf_dump_order_type(d, m->type, false);\n\t\t\tif (err < 0)\n\t\t\t\treturn err;\n\t\t}\n\n\t\tif (t->name_off != 0) {\n\t\t\terr = btf_dump_add_emit_queue_id(d, id);\n\t\t\tif (err < 0)\n\t\t\t\treturn err;\n\t\t}\n\n\t\ttstate->order_state = ORDERED;\n\t\treturn 1;\n\t}\n\tcase BTF_KIND_ENUM:\n\tcase BTF_KIND_FWD:\n\t\t/*\n\t\t * non-anonymous or non-referenced enums are top-level\n\t\t * declarations and should be emitted. Same logic can be\n\t\t * applied to FWDs, it won't hurt anyways.\n\t\t */\n\t\tif (t->name_off != 0 || !tstate->referenced) {\n\t\t\terr = btf_dump_add_emit_queue_id(d, id);\n\t\t\tif (err)\n\t\t\t\treturn err;\n\t\t}\n\t\ttstate->order_state = ORDERED;\n\t\treturn 1;\n\n\tcase BTF_KIND_TYPEDEF: {\n\t\tint is_strong;\n\n\t\tis_strong = btf_dump_order_type(d, t->type, through_ptr);\n\t\tif (is_strong < 0)\n\t\t\treturn is_strong;\n\n\t\t/* typedef is similar to struct/union w.r.t. fwd-decls */\n\t\tif (through_ptr && !is_strong)\n\t\t\treturn 0;\n\n\t\t/* typedef is always a named definition */\n\t\terr = btf_dump_add_emit_queue_id(d, id);\n\t\tif (err)\n\t\t\treturn err;\n\n\t\td->type_states[id].order_state = ORDERED;\n\t\treturn 1;\n\t}\n\tcase BTF_KIND_VOLATILE:\n\tcase BTF_KIND_CONST:\n\tcase BTF_KIND_RESTRICT:\n\tcase BTF_KIND_TYPE_TAG:\n\t\treturn btf_dump_order_type(d, t->type, through_ptr);\n\n\tcase BTF_KIND_FUNC_PROTO: {\n\t\tconst struct btf_param *p = btf_params(t);\n\t\tbool is_strong;\n\n\t\terr = btf_dump_order_type(d, t->type, through_ptr);\n\t\tif (err < 0)\n\t\t\treturn err;\n\t\tis_strong = err > 0;\n\n\t\tvlen = btf_vlen(t);\n\t\tfor (i = 0; i < vlen; i++, p++) {\n\t\t\terr = btf_dump_order_type(d, p->type, through_ptr);\n\t\t\tif (err < 0)\n\t\t\t\treturn err;\n\t\t\tif (err > 0)\n\t\t\t\tis_strong = true;\n\t\t}\n\t\treturn is_strong;\n\t}\n\tcase BTF_KIND_FUNC:\n\tcase BTF_KIND_VAR:\n\tcase BTF_KIND_DATASEC:\n\tcase BTF_KIND_DECL_TAG:\n\t\td->type_states[id].order_state = ORDERED;\n\t\treturn 0;\n\n\tdefault:\n\t\treturn -EINVAL;\n\t}\n}", "static void pr_ibs_op_data3(union ibs_op_data3 reg)\n{\n\tchar l2_miss_str[sizeof(\" L2Miss _\")] = \"\";\n\tchar op_mem_width_str[sizeof(\" OpMemWidth _____ bytes\")] = \"\";\n\tchar op_dc_miss_open_mem_reqs_str[sizeof(\" OpDcMissOpenMemReqs __\")] = \"\";": "static void pr_ibs_op_data3(union ibs_op_data3 reg)\n{\n\tchar l2_miss_str[sizeof(\" L2Miss _\")] = \"\";\n\tchar op_mem_width_str[sizeof(\" OpMemWidth _____ bytes\")] = \"\";\n\tchar op_dc_miss_open_mem_reqs_str[sizeof(\" OpDcMissOpenMemReqs __\")] = \"\";\n\n\t/*\n\t * Erratum #1293\n\t * Ignore L2Miss and OpDcMissOpenMemReqs (and opdata2) if DcMissNoMabAlloc or SwPf set\n\t */\n\tif (!(cpu_family == 0x19 && cpu_model < 0x10 && (reg.dc_miss_no_mab_alloc || reg.sw_pf))) {\n\t\tsnprintf(l2_miss_str, sizeof(l2_miss_str), \" L2Miss %d\", reg.l2_miss);\n\t\tsnprintf(op_dc_miss_open_mem_reqs_str, sizeof(op_dc_miss_open_mem_reqs_str),\n\t\t\t \" OpDcMissOpenMemReqs %2d\", reg.op_dc_miss_open_mem_reqs);\n\t}\n\n\tif (reg.op_mem_width)\n\t\tsnprintf(op_mem_width_str, sizeof(op_mem_width_str),\n\t\t\t \" OpMemWidth %2d bytes\", 1 << (reg.op_mem_width - 1));\n\n\tprintf(\"ibs_op_data3:\\t%016llx LdOp %d StOp %d DcL1TlbMiss %d DcL2TlbMiss %d \"\n\t\t\"DcL1TlbHit2M %d DcL1TlbHit1G %d DcL2TlbHit2M %d DcMiss %d DcMisAcc %d \"\n\t\t\"DcWcMemAcc %d DcUcMemAcc %d DcLockedOp %d DcMissNoMabAlloc %d DcLinAddrValid %d \"\n\t\t\"DcPhyAddrValid %d DcL2TlbHit1G %d%s SwPf %d%s%s DcMissLat %5d TlbRefillLat %5d\\n\",\n\t\treg.val, reg.ld_op, reg.st_op, reg.dc_l1tlb_miss, reg.dc_l2tlb_miss,\n\t\treg.dc_l1tlb_hit_2m, reg.dc_l1tlb_hit_1g, reg.dc_l2tlb_hit_2m, reg.dc_miss,\n\t\treg.dc_mis_acc, reg.dc_wc_mem_acc, reg.dc_uc_mem_acc, reg.dc_locked_op,\n\t\treg.dc_miss_no_mab_alloc, reg.dc_lin_addr_valid, reg.dc_phy_addr_valid,\n\t\treg.dc_l2_tlb_hit_1g, l2_miss_str, reg.sw_pf, op_mem_width_str,\n\t\top_dc_miss_open_mem_reqs_str, reg.dc_miss_lat, reg.tlb_refill_lat);\n}", "static int _BlockWrite(struct adapter *padapter, void *buffer, u32 buffSize)\n{\n\tint ret = _SUCCESS;\n\n\tu32 blockSize_p1 = 4; /* (Default) Phase #1 : PCI muse use 4-byte write to download FW */": "static int _BlockWrite(struct adapter *padapter, void *buffer, u32 buffSize)\n{\n\tint ret = _SUCCESS;\n\n\tu32 blockSize_p1 = 4; /* (Default) Phase #1 : PCI muse use 4-byte write to download FW */\n\tu32 blockSize_p2 = 8; /* Phase #2 : Use 8-byte, if Phase#1 use big size to write FW. */\n\tu32 blockSize_p3 = 1; /* Phase #3 : Use 1-byte, the remnant of FW image. */\n\tu32 blockCount_p1 = 0, blockCount_p2 = 0, blockCount_p3 = 0;\n\tu32 remainSize_p1 = 0, remainSize_p2 = 0;\n\tu8 *bufferPtr = buffer;\n\tu32 i = 0, offset = 0;\n\n/* \tprintk(\"====>%s %d\\n\", __func__, __LINE__); */\n\n\t/* 3 Phase #1 */\n\tblockCount_p1 = buffSize / blockSize_p1;\n\tremainSize_p1 = buffSize % blockSize_p1;\n\n\tfor (i = 0; i < blockCount_p1; i++) {\n\t\tret = rtw_write32(padapter, (FW_8723B_START_ADDRESS + i * blockSize_p1), *((u32 *)(bufferPtr + i * blockSize_p1)));\n\t\tif (ret == _FAIL) {\n\t\t\tprintk(\"====>%s %d i:%d\\n\", __func__, __LINE__, i);\n\t\t\tgoto exit;\n\t\t}\n\t}\n\n\t/* 3 Phase #2 */\n\tif (remainSize_p1) {\n\t\toffset = blockCount_p1 * blockSize_p1;\n\n\t\tblockCount_p2 = remainSize_p1/blockSize_p2;\n\t\tremainSize_p2 = remainSize_p1%blockSize_p2;\n\t}\n\n\t/* 3 Phase #3 */\n\tif (remainSize_p2) {\n\t\toffset = (blockCount_p1 * blockSize_p1) + (blockCount_p2 * blockSize_p2);\n\n\t\tblockCount_p3 = remainSize_p2 / blockSize_p3;\n\n\t\tfor (i = 0; i < blockCount_p3; i++) {\n\t\t\tret = rtw_write8(padapter, (FW_8723B_START_ADDRESS + offset + i), *(bufferPtr + offset + i));\n\n\t\t\tif (ret == _FAIL) {\n\t\t\t\tprintk(\"====>%s %d i:%d\\n\", __func__, __LINE__, i);\n\t\t\t\tgoto exit;\n\t\t\t}\n\t\t}\n\t}\nexit:\n\treturn ret;\n}", "static void amp_voyetra(struct snd_cs46xx *chip, int change)\n{\n\t/* Manage the EAPD bit on the Crystal 4297 \n\t and the Analog AD1885 */\n\t ": "static void amp_voyetra(struct snd_cs46xx *chip, int change)\n{\n\t/* Manage the EAPD bit on the Crystal 4297 \n\t and the Analog AD1885 */\n\t \n#ifdef CONFIG_SND_CS46XX_NEW_DSP\n\tint old = chip->amplifier;\n#endif\n\tint oval, val;\n\t\n\tchip->amplifier += change;\n\toval = snd_cs46xx_codec_read(chip, AC97_POWERDOWN,\n\t\t\t\t CS46XX_PRIMARY_CODEC_INDEX);\n\tval = oval;\n\tif (chip->amplifier) {\n\t\t/* Turn the EAPD amp on */\n\t\tval |= 0x8000;\n\t} else {\n\t\t/* Turn the EAPD amp off */\n\t\tval &= ~0x8000;\n\t}\n\tif (val != oval) {\n\t\tsnd_cs46xx_codec_write(chip, AC97_POWERDOWN, val,\n\t\t\t\t CS46XX_PRIMARY_CODEC_INDEX);\n\t\tif (chip->eapd_switch)\n\t\t\tsnd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,\n\t\t\t\t &chip->eapd_switch->id);\n\t}\n\n#ifdef CONFIG_SND_CS46XX_NEW_DSP\n\tif (chip->amplifier && !old) {\n\t\tvoyetra_setup_eapd_slot(chip);\n\t}\n#endif\n}", "static int t7xx_dpmaif_init_intr(struct dpmaif_hw_info *hw_info)\n{\n\tstruct dpmaif_isr_en_mask *isr_en_msk = &hw_info->isr_en_mask;\n\tu32 value, ul_intr_enable, dl_intr_enable;\n\tint ret;": "static int t7xx_dpmaif_init_intr(struct dpmaif_hw_info *hw_info)\n{\n\tstruct dpmaif_isr_en_mask *isr_en_msk = &hw_info->isr_en_mask;\n\tu32 value, ul_intr_enable, dl_intr_enable;\n\tint ret;\n\n\tul_intr_enable = DP_UL_INT_ERR_MSK | DP_UL_INT_QDONE_MSK;\n\tisr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable;\n\tiowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);\n\n\t/* Set interrupt enable mask */\n\tiowrite32(ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0);\n\tiowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0);\n\n\t/* Check mask status */\n\tret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,\n\t\t\t\t\t value, (value & ul_intr_enable) != ul_intr_enable, 0,\n\t\t\t\t\t DPMAIF_CHECK_INIT_TIMEOUT_US);\n\tif (ret)\n\t\treturn ret;\n\n\tdl_intr_enable = DP_DL_INT_PITCNT_LEN_ERR | DP_DL_INT_BATCNT_LEN_ERR;\n\tisr_en_msk->ap_dl_l2intr_err_en_msk = dl_intr_enable;\n\tul_intr_enable = DPMAIF_DL_INT_DLQ0_QDONE | DPMAIF_DL_INT_DLQ0_PITCNT_LEN |\n\t\t DPMAIF_DL_INT_DLQ1_QDONE | DPMAIF_DL_INT_DLQ1_PITCNT_LEN;\n\tisr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable;\n\tiowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);\n\n\t/* Set DL ISR PD enable mask */\n\tiowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);\n\tret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0,\n\t\t\t\t\t value, (value & ul_intr_enable) != ul_intr_enable, 0,\n\t\t\t\t\t DPMAIF_CHECK_INIT_TIMEOUT_US);\n\tif (ret)\n\t\treturn ret;\n\n\tisr_en_msk->ap_udl_ip_busy_en_msk = DPMAIF_UDL_IP_BUSY;\n\tiowrite32(DPMAIF_AP_IP_BUSY_MASK, hw_info->pcie_base + DPMAIF_AP_IP_BUSY);\n\tiowrite32(isr_en_msk->ap_udl_ip_busy_en_msk,\n\t\t hw_info->pcie_base + DPMAIF_AO_AP_DLUL_IP_BUSY_MASK);\n\tvalue = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0);\n\tvalue |= DPMAIF_DL_INT_Q2APTOP | DPMAIF_DL_INT_Q2TOQ1;\n\tiowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0);\n\tiowrite32(DPMA_HPC_ALL_INT_MASK, hw_info->pcie_base + DPMAIF_HPC_INTR_MASK);\n\n\treturn 0;\n}", "static int hwlat_mode_open(struct inode *inode, struct file *file)\n{\n\treturn seq_open(file, &thread_mode_seq_ops);\n};\n": "static int hwlat_mode_open(struct inode *inode, struct file *file)\n{\n\treturn seq_open(file, &thread_mode_seq_ops);\n};\n\nstatic void hwlat_tracer_start(struct trace_array *tr);\nstatic void hwlat_tracer_stop(struct trace_array *tr);\n\n/**\n * hwlat_mode_write - Write function for \"mode\" entry\n * @filp: The active open file structure\n * @ubuf: The user buffer that contains the value to write\n * @cnt: The maximum number of bytes to write to \"file\"\n * @ppos: The current position in @file\n *\n * This function provides a write implementation for the \"mode\" interface\n * to the hardware latency detector. hwlatd has different operation modes.\n * The \"none\" sets the allowed cpumask for a single hwlatd thread at the\n * startup and lets the scheduler handle the migration. The default mode is\n * the \"round-robin\" one, in which a single hwlatd thread runs, migrating\n * among the allowed CPUs in a round-robin fashion. The \"per-cpu\" mode\n * creates one hwlatd thread per allowed CPU.\n */\nstatic ssize_t hwlat_mode_write(struct file *filp, const char __user *ubuf,\n\t\t\t\t size_t cnt, loff_t *ppos)\n{\n\tstruct trace_array *tr = hwlat_trace;\n\tconst char *mode;\n\tchar buf[64];\n\tint ret, i;\n\n\tif (cnt >= sizeof(buf))\n\t\treturn -EINVAL;\n\n\tif (copy_from_user(buf, ubuf, cnt))\n\t\treturn -EFAULT;\n\n\tbuf[cnt] = 0;\n\n\tmode = strstrip(buf);\n\n\tret = -EINVAL;\n\n\t/*\n\t * trace_types_lock is taken to avoid concurrency on start/stop\n\t * and hwlat_busy.\n\t */\n\tmutex_lock(&trace_types_lock);\n\tif (hwlat_busy)\n\t\thwlat_tracer_stop(tr);\n\n\tmutex_lock(&hwlat_data.lock);\n\n\tfor (i = 0; i < MODE_MAX; i++) {\n\t\tif (strcmp(mode, thread_mode_str[i]) == 0) {\n\t\t\thwlat_data.thread_mode = i;\n\t\t\tret = cnt;\n\t\t}\n\t}\n\n\tmutex_unlock(&hwlat_data.lock);\n\n\tif (hwlat_busy)\n\t\thwlat_tracer_start(tr);\n\tmutex_unlock(&trace_types_lock);\n\n\t*ppos += cnt;\n\n\n\n\treturn ret;\n}", "static void ae5_exit_chip(struct hda_codec *codec)\n{\n\tchipio_set_stream_control(codec, 0x03, 0);\n\tchipio_set_stream_control(codec, 0x04, 0);\n": "static void ae5_exit_chip(struct hda_codec *codec)\n{\n\tchipio_set_stream_control(codec, 0x03, 0);\n\tchipio_set_stream_control(codec, 0x04, 0);\n\n\tca0113_mmio_command_set(codec, 0x30, 0x32, 0x3f);\n\tca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);\n\tca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);\n\tca0113_mmio_command_set(codec, 0x30, 0x30, 0x00);\n\tca0113_mmio_command_set(codec, 0x30, 0x2b, 0x00);\n\tca0113_mmio_command_set(codec, 0x30, 0x2d, 0x00);\n\tca0113_mmio_gpio_set(codec, 0, false);\n\tca0113_mmio_gpio_set(codec, 1, false);\n\n\tsnd_hda_codec_write(codec, 0x01, 0, 0x793, 0x00);\n\tsnd_hda_codec_write(codec, 0x01, 0, 0x794, 0x53);\n\n\tchipio_set_control_param(codec, CONTROL_PARAM_ASI, 0);\n\n\tchipio_set_stream_control(codec, 0x18, 0);\n\tchipio_set_stream_control(codec, 0x0c, 0);\n\n\tsnd_hda_codec_write(codec, 0x01, 0, 0x724, 0x83);\n}", "static void config_probs(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)\n{\n\tstruct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;\n\tstruct hantro_aux_buf *misc = &vp9_ctx->misc;\n\tstruct hantro_g2_all_probs *all_probs = misc->cpu;": "static void config_probs(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)\n{\n\tstruct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;\n\tstruct hantro_aux_buf *misc = &vp9_ctx->misc;\n\tstruct hantro_g2_all_probs *all_probs = misc->cpu;\n\tstruct hantro_g2_probs *adaptive;\n\tstruct hantro_g2_mv_probs *mv;\n\tconst struct v4l2_vp9_segmentation *seg = &dec_params->seg;\n\tconst struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;\n\tint i, j, k, l, m;\n\n\tfor (i = 0; i < ARRAY_SIZE(all_probs->kf_y_mode_prob); ++i)\n\t\tfor (j = 0; j < ARRAY_SIZE(all_probs->kf_y_mode_prob[0]); ++j) {\n\t\t\tmemcpy(all_probs->kf_y_mode_prob[i][j],\n\t\t\t v4l2_vp9_kf_y_mode_prob[i][j],\n\t\t\t ARRAY_SIZE(all_probs->kf_y_mode_prob[i][j]));\n\n\t\t\tall_probs->kf_y_mode_prob_tail[i][j][0] =\n\t\t\t\tv4l2_vp9_kf_y_mode_prob[i][j][8];\n\t\t}\n\n\tmemcpy(all_probs->mb_segment_tree_probs, seg->tree_probs,\n\t sizeof(all_probs->mb_segment_tree_probs));\n\n\tmemcpy(all_probs->segment_pred_probs, seg->pred_probs,\n\t sizeof(all_probs->segment_pred_probs));\n\n\tfor (i = 0; i < ARRAY_SIZE(all_probs->kf_uv_mode_prob); ++i) {\n\t\tmemcpy(all_probs->kf_uv_mode_prob[i], v4l2_vp9_kf_uv_mode_prob[i],\n\t\t ARRAY_SIZE(all_probs->kf_uv_mode_prob[i]));\n\n\t\tall_probs->kf_uv_mode_prob_tail[i][0] = v4l2_vp9_kf_uv_mode_prob[i][8];\n\t}\n\n\tadaptive = &all_probs->probs;\n\n\tfor (i = 0; i < ARRAY_SIZE(adaptive->inter_mode); ++i) {\n\t\tmemcpy(adaptive->inter_mode[i], probs->inter_mode[i],\n\t\t ARRAY_SIZE(probs->inter_mode[i]));\n\n\t\tadaptive->inter_mode[i][3] = 0;\n\t}\n\n\tmemcpy(adaptive->is_inter, probs->is_inter, sizeof(adaptive->is_inter));\n\n\tfor (i = 0; i < ARRAY_SIZE(adaptive->uv_mode); ++i) {\n\t\tmemcpy(adaptive->uv_mode[i], probs->uv_mode[i],\n\t\t sizeof(adaptive->uv_mode[i]));\n\t\tadaptive->uv_mode_tail[i][0] = probs->uv_mode[i][8];\n\t}\n\n\tmemcpy(adaptive->tx8, probs->tx8, sizeof(adaptive->tx8));\n\tmemcpy(adaptive->tx16, probs->tx16, sizeof(adaptive->tx16));\n\tmemcpy(adaptive->tx32, probs->tx32, sizeof(adaptive->tx32));\n\n\tfor (i = 0; i < ARRAY_SIZE(adaptive->y_mode); ++i) {\n\t\tmemcpy(adaptive->y_mode[i], probs->y_mode[i],\n\t\t ARRAY_SIZE(adaptive->y_mode[i]));\n\n\t\tadaptive->y_mode_tail[i][0] = probs->y_mode[i][8];\n\t}\n\n\tfor (i = 0; i < ARRAY_SIZE(adaptive->partition[0]); ++i) {\n\t\tmemcpy(adaptive->partition[0][i], v4l2_vp9_kf_partition_probs[i],\n\t\t sizeof(v4l2_vp9_kf_partition_probs[i]));\n\n\t\tadaptive->partition[0][i][3] = 0;\n\t}\n\n\tfor (i = 0; i < ARRAY_SIZE(adaptive->partition[1]); ++i) {\n\t\tmemcpy(adaptive->partition[1][i], probs->partition[i],\n\t\t sizeof(probs->partition[i]));\n\n\t\tadaptive->partition[1][i][3] = 0;\n\t}\n\n\tmemcpy(adaptive->interp_filter, probs->interp_filter,\n\t sizeof(adaptive->interp_filter));\n\n\tmemcpy(adaptive->comp_mode, probs->comp_mode, sizeof(adaptive->comp_mode));\n\n\tmemcpy(adaptive->skip, probs->skip, sizeof(adaptive->skip));\n\n\tmv = &adaptive->mv;\n\n\tmemcpy(mv->joint, probs->mv.joint, sizeof(mv->joint));\n\tmemcpy(mv->sign, probs->mv.sign, sizeof(mv->sign));\n\tmemcpy(mv->class0_bit, probs->mv.class0_bit, sizeof(mv->class0_bit));\n\tmemcpy(mv->fr, probs->mv.fr, sizeof(mv->fr));\n\tmemcpy(mv->class0_hp, probs->mv.class0_hp, sizeof(mv->class0_hp));\n\tmemcpy(mv->hp, probs->mv.hp, sizeof(mv->hp));\n\tmemcpy(mv->classes, probs->mv.classes, sizeof(mv->classes));\n\tmemcpy(mv->class0_fr, probs->mv.class0_fr, sizeof(mv->class0_fr));\n\tmemcpy(mv->bits, probs->mv.bits, sizeof(mv->bits));\n\n\tmemcpy(adaptive->single_ref, probs->single_ref, sizeof(adaptive->single_ref));\n\n\tmemcpy(adaptive->comp_ref, probs->comp_ref, sizeof(adaptive->comp_ref));\n\n\tfor (i = 0; i < ARRAY_SIZE(adaptive->coef); ++i)\n\t\tfor (j = 0; j < ARRAY_SIZE(adaptive->coef[0]); ++j)\n\t\t\tfor (k = 0; k < ARRAY_SIZE(adaptive->coef[0][0]); ++k)\n\t\t\t\tfor (l = 0; l < ARRAY_SIZE(adaptive->coef[0][0][0]); ++l)\n\t\t\t\t\tINNER_LOOP;\n\n\thantro_write_addr(ctx->dev, G2_VP9_PROBS_ADDR, misc->dma);\n}", "static int dib9000_fw_boot(struct dib9000_state *state, const u8 * codeA, u32 lenA, const u8 * codeB, u32 lenB)\n{\n\t/* Reconfig pool mac ram */\n\tdib9000_write_word(state, 1225, 0x02);\t/* A: 8k C, 4 k D - B: 32k C 6 k D - IRAM 96k */\n\tdib9000_write_word(state, 1226, 0x05);": "static int dib9000_fw_boot(struct dib9000_state *state, const u8 * codeA, u32 lenA, const u8 * codeB, u32 lenB)\n{\n\t/* Reconfig pool mac ram */\n\tdib9000_write_word(state, 1225, 0x02);\t/* A: 8k C, 4 k D - B: 32k C 6 k D - IRAM 96k */\n\tdib9000_write_word(state, 1226, 0x05);\n\n\t/* Toggles IP crypto to Host APB interface. */\n\tdib9000_write_word(state, 1542, 1);\n\n\t/* Set jump and no jump in the dma box */\n\tdib9000_write_word(state, 1074, 0);\n\tdib9000_write_word(state, 1075, 0);\n\n\t/* Set MAC as APB Master. */\n\tdib9000_write_word(state, 1237, 0);\n\n\t/* Reset the RISCs */\n\tif (codeA != NULL)\n\t\tdib9000_write_word(state, 1024, 2);\n\telse\n\t\tdib9000_write_word(state, 1024, 15);\n\tif (codeB != NULL)\n\t\tdib9000_write_word(state, 1040, 2);\n\n\tif (codeA != NULL)\n\t\tdib9000_firmware_download(state, 0, 0x1234, codeA, lenA);\n\tif (codeB != NULL)\n\t\tdib9000_firmware_download(state, 1, 0x1234, codeB, lenB);\n\n\t/* Run the RISCs */\n\tif (codeA != NULL)\n\t\tdib9000_write_word(state, 1024, 0);\n\tif (codeB != NULL)\n\t\tdib9000_write_word(state, 1040, 0);\n\n\tif (codeA != NULL)\n\t\tif (dib9000_mbx_host_init(state, 0) != 0)\n\t\t\treturn -EIO;\n\tif (codeB != NULL)\n\t\tif (dib9000_mbx_host_init(state, 1) != 0)\n\t\t\treturn -EIO;\n\n\tmsleep(100);\n\tstate->platform.risc.fw_is_running = 1;\n\n\tif (dib9000_risc_check_version(state) != 0)\n\t\treturn -EINVAL;\n\n\tstate->platform.risc.memcmd = 0xff;\n\treturn 0;\n}", "static void test_sockmap(unsigned int tasks, void *data)\n{\n\tstruct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break;\n\tint map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break;\n\tint ports[] = {50200, 50201, 50202, 50204};": "static void test_sockmap(unsigned int tasks, void *data)\n{\n\tstruct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break;\n\tint map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break;\n\tint ports[] = {50200, 50201, 50202, 50204};\n\tint err, i, fd, udp, sfd[6] = {0xdeadbeef};\n\tu8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0};\n\tint parse_prog, verdict_prog, msg_prog;\n\tstruct sockaddr_in addr;\n\tint one = 1, s, sc, rc;\n\tstruct bpf_object *obj;\n\tstruct timeval to;\n\t__u32 key, value;\n\tpid_t pid[tasks];\n\tfd_set w;\n\n\t/* Create some sockets to use with sockmap */\n\tfor (i = 0; i < 2; i++) {\n\t\tsfd[i] = socket(AF_INET, SOCK_STREAM, 0);\n\t\tif (sfd[i] < 0)\n\t\t\tgoto out;\n\t\terr = setsockopt(sfd[i], SOL_SOCKET, SO_REUSEADDR,\n\t\t\t\t (char *)&one, sizeof(one));\n\t\tif (err) {\n\t\t\tprintf(\"failed to setsockopt\\n\");\n\t\t\tgoto out;\n\t\t}\n\t\terr = ioctl(sfd[i], FIONBIO, (char *)&one);\n\t\tif (err < 0) {\n\t\t\tprintf(\"failed to ioctl\\n\");\n\t\t\tgoto out;\n\t\t}\n\t\tmemset(&addr, 0, sizeof(struct sockaddr_in));\n\t\taddr.sin_family = AF_INET;\n\t\taddr.sin_addr.s_addr = inet_addr(\"127.0.0.1\");\n\t\taddr.sin_port = htons(ports[i]);\n\t\terr = bind(sfd[i], (struct sockaddr *)&addr, sizeof(addr));\n\t\tif (err < 0) {\n\t\t\tprintf(\"failed to bind: err %i: %i:%i\\n\",\n\t\t\t err, i, sfd[i]);\n\t\t\tgoto out;\n\t\t}\n\t\terr = listen(sfd[i], 32);\n\t\tif (err < 0) {\n\t\t\tprintf(\"failed to listen\\n\");\n\t\t\tgoto out;\n\t\t}\n\t}\n\n\tfor (i = 2; i < 4; i++) {\n\t\tsfd[i] = socket(AF_INET, SOCK_STREAM, 0);\n\t\tif (sfd[i] < 0)\n\t\t\tgoto out;\n\t\terr = setsockopt(sfd[i], SOL_SOCKET, SO_REUSEADDR,\n\t\t\t\t (char *)&one, sizeof(one));\n\t\tif (err) {\n\t\t\tprintf(\"set sock opt\\n\");\n\t\t\tgoto out;\n\t\t}\n\t\tmemset(&addr, 0, sizeof(struct sockaddr_in));\n\t\taddr.sin_family = AF_INET;\n\t\taddr.sin_addr.s_addr = inet_addr(\"127.0.0.1\");\n\t\taddr.sin_port = htons(ports[i - 2]);\n\t\terr = connect(sfd[i], (struct sockaddr *)&addr, sizeof(addr));\n\t\tif (err) {\n\t\t\tprintf(\"failed to connect\\n\");\n\t\t\tgoto out;\n\t\t}\n\t}\n\n\n\tfor (i = 4; i < 6; i++) {\n\t\tsfd[i] = accept(sfd[i - 4], NULL, NULL);\n\t\tif (sfd[i] < 0) {\n\t\t\tprintf(\"accept failed\\n\");\n\t\t\tgoto out;\n\t\t}\n\t}\n\n\t/* Test sockmap with connected sockets */\n\tfd = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL,\n\t\t\t sizeof(key), sizeof(value),\n\t\t\t 6, NULL);\n\tif (fd < 0) {\n\t\tif (!libbpf_probe_bpf_map_type(BPF_MAP_TYPE_SOCKMAP, NULL)) {\n\t\t\tprintf(\"%s SKIP (unsupported map type BPF_MAP_TYPE_SOCKMAP)\\n\",\n\t\t\t __func__);\n\t\t\tskips++;\n\t\t\tfor (i = 0; i < 6; i++)\n\t\t\t\tclose(sfd[i]);\n\t\t\treturn;\n\t\t}\n\n\t\tprintf(\"Failed to create sockmap %i\\n\", fd);\n\t\tgoto out_sockmap;\n\t}\n\n\t/* Test update with unsupported UDP socket */\n\tudp = socket(AF_INET, SOCK_DGRAM, 0);\n\ti = 0;\n\terr = bpf_map_update_elem(fd, &i, &udp, BPF_ANY);\n\tif (err) {\n\t\tprintf(\"Failed socket update SOCK_DGRAM '%i:%i'\\n\",\n\t\t i, udp);\n\t\tgoto out_sockmap;\n\t}\n\n\t/* Test update without programs */\n\tfor (i = 0; i < 6; i++) {\n\t\terr = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);\n\t\tif (err) {\n\t\t\tprintf(\"Failed noprog update sockmap '%i:%i'\\n\",\n\t\t\t i, sfd[i]);\n\t\t\tgoto out_sockmap;\n\t\t}\n\t}\n\n\t/* Test attaching/detaching bad fds */\n\terr = bpf_prog_attach(-1, fd, BPF_SK_SKB_STREAM_PARSER, 0);\n\tif (!err) {\n\t\tprintf(\"Failed invalid parser prog attach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_attach(-1, fd, BPF_SK_SKB_STREAM_VERDICT, 0);\n\tif (!err) {\n\t\tprintf(\"Failed invalid verdict prog attach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_attach(-1, fd, BPF_SK_MSG_VERDICT, 0);\n\tif (!err) {\n\t\tprintf(\"Failed invalid msg verdict prog attach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_attach(-1, fd, __MAX_BPF_ATTACH_TYPE, 0);\n\tif (!err) {\n\t\tprintf(\"Failed unknown prog attach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER);\n\tif (!err) {\n\t\tprintf(\"Failed empty parser prog detach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT);\n\tif (!err) {\n\t\tprintf(\"Failed empty verdict prog detach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT);\n\tif (!err) {\n\t\tprintf(\"Failed empty msg verdict prog detach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_detach(fd, __MAX_BPF_ATTACH_TYPE);\n\tif (!err) {\n\t\tprintf(\"Detach invalid prog successful\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\t/* Load SK_SKB program and Attach */\n\terr = bpf_prog_test_load(SOCKMAP_PARSE_PROG,\n\t\t\t BPF_PROG_TYPE_SK_SKB, &obj, &parse_prog);\n\tif (err) {\n\t\tprintf(\"Failed to load SK_SKB parse prog\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_test_load(SOCKMAP_TCP_MSG_PROG,\n\t\t\t BPF_PROG_TYPE_SK_MSG, &obj, &msg_prog);\n\tif (err) {\n\t\tprintf(\"Failed to load SK_SKB msg prog\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_test_load(SOCKMAP_VERDICT_PROG,\n\t\t\t BPF_PROG_TYPE_SK_SKB, &obj, &verdict_prog);\n\tif (err) {\n\t\tprintf(\"Failed to load SK_SKB verdict prog\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\tbpf_map_rx = bpf_object__find_map_by_name(obj, \"sock_map_rx\");\n\tif (!bpf_map_rx) {\n\t\tprintf(\"Failed to load map rx from verdict prog\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\tmap_fd_rx = bpf_map__fd(bpf_map_rx);\n\tif (map_fd_rx < 0) {\n\t\tprintf(\"Failed to get map rx fd\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\tbpf_map_tx = bpf_object__find_map_by_name(obj, \"sock_map_tx\");\n\tif (!bpf_map_tx) {\n\t\tprintf(\"Failed to load map tx from verdict prog\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\tmap_fd_tx = bpf_map__fd(bpf_map_tx);\n\tif (map_fd_tx < 0) {\n\t\tprintf(\"Failed to get map tx fd\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\tbpf_map_msg = bpf_object__find_map_by_name(obj, \"sock_map_msg\");\n\tif (!bpf_map_msg) {\n\t\tprintf(\"Failed to load map msg from msg_verdict prog\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\tmap_fd_msg = bpf_map__fd(bpf_map_msg);\n\tif (map_fd_msg < 0) {\n\t\tprintf(\"Failed to get map msg fd\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\tbpf_map_break = bpf_object__find_map_by_name(obj, \"sock_map_break\");\n\tif (!bpf_map_break) {\n\t\tprintf(\"Failed to load map tx from verdict prog\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\tmap_fd_break = bpf_map__fd(bpf_map_break);\n\tif (map_fd_break < 0) {\n\t\tprintf(\"Failed to get map tx fd\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_attach(parse_prog, map_fd_break,\n\t\t\t BPF_SK_SKB_STREAM_PARSER, 0);\n\tif (!err) {\n\t\tprintf(\"Allowed attaching SK_SKB program to invalid map\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_attach(parse_prog, map_fd_rx,\n\t\t BPF_SK_SKB_STREAM_PARSER, 0);\n\tif (err) {\n\t\tprintf(\"Failed stream parser bpf prog attach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_attach(verdict_prog, map_fd_rx,\n\t\t\t BPF_SK_SKB_STREAM_VERDICT, 0);\n\tif (err) {\n\t\tprintf(\"Failed stream verdict bpf prog attach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_attach(msg_prog, map_fd_msg, BPF_SK_MSG_VERDICT, 0);\n\tif (err) {\n\t\tprintf(\"Failed msg verdict bpf prog attach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_attach(verdict_prog, map_fd_rx,\n\t\t\t __MAX_BPF_ATTACH_TYPE, 0);\n\tif (!err) {\n\t\tprintf(\"Attached unknown bpf prog\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\t/* Test map update elem afterwards fd lives in fd and map_fd */\n\tfor (i = 2; i < 6; i++) {\n\t\terr = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY);\n\t\tif (err) {\n\t\t\tprintf(\"Failed map_fd_rx update sockmap %i '%i:%i'\\n\",\n\t\t\t err, i, sfd[i]);\n\t\t\tgoto out_sockmap;\n\t\t}\n\t\terr = bpf_map_update_elem(map_fd_tx, &i, &sfd[i], BPF_ANY);\n\t\tif (err) {\n\t\t\tprintf(\"Failed map_fd_tx update sockmap %i '%i:%i'\\n\",\n\t\t\t err, i, sfd[i]);\n\t\t\tgoto out_sockmap;\n\t\t}\n\t}\n\n\t/* Test map delete elem and remove send/recv sockets */\n\tfor (i = 2; i < 4; i++) {\n\t\terr = bpf_map_delete_elem(map_fd_rx, &i);\n\t\tif (err) {\n\t\t\tprintf(\"Failed delete sockmap rx %i '%i:%i'\\n\",\n\t\t\t err, i, sfd[i]);\n\t\t\tgoto out_sockmap;\n\t\t}\n\t\terr = bpf_map_delete_elem(map_fd_tx, &i);\n\t\tif (err) {\n\t\t\tprintf(\"Failed delete sockmap tx %i '%i:%i'\\n\",\n\t\t\t err, i, sfd[i]);\n\t\t\tgoto out_sockmap;\n\t\t}\n\t}\n\n\t/* Put sfd[2] (sending fd below) into msg map to test sendmsg bpf */\n\ti = 0;\n\terr = bpf_map_update_elem(map_fd_msg, &i, &sfd[2], BPF_ANY);\n\tif (err) {\n\t\tprintf(\"Failed map_fd_msg update sockmap %i\\n\", err);\n\t\tgoto out_sockmap;\n\t}\n\n\t/* Test map send/recv */\n\tfor (i = 0; i < 2; i++) {\n\t\tbuf[0] = i;\n\t\tbuf[1] = 0x5;\n\t\tsc = send(sfd[2], buf, 20, 0);\n\t\tif (sc < 0) {\n\t\t\tprintf(\"Failed sockmap send\\n\");\n\t\t\tgoto out_sockmap;\n\t\t}\n\n\t\tFD_ZERO(&w);\n\t\tFD_SET(sfd[3], &w);\n\t\tto.tv_sec = 30;\n\t\tto.tv_usec = 0;\n\t\ts = select(sfd[3] + 1, &w, NULL, NULL, &to);\n\t\tif (s == -1) {\n\t\t\tperror(\"Failed sockmap select()\");\n\t\t\tgoto out_sockmap;\n\t\t} else if (!s) {\n\t\t\tprintf(\"Failed sockmap unexpected timeout\\n\");\n\t\t\tgoto out_sockmap;\n\t\t}\n\n\t\tif (!FD_ISSET(sfd[3], &w)) {\n\t\t\tprintf(\"Failed sockmap select/recv\\n\");\n\t\t\tgoto out_sockmap;\n\t\t}\n\n\t\trc = recv(sfd[3], buf, sizeof(buf), 0);\n\t\tif (rc < 0) {\n\t\t\tprintf(\"Failed sockmap recv\\n\");\n\t\t\tgoto out_sockmap;\n\t\t}\n\t}\n\n\t/* Negative null entry lookup from datapath should be dropped */\n\tbuf[0] = 1;\n\tbuf[1] = 12;\n\tsc = send(sfd[2], buf, 20, 0);\n\tif (sc < 0) {\n\t\tprintf(\"Failed sockmap send\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\t/* Push fd into same slot */\n\ti = 2;\n\terr = bpf_map_update_elem(fd, &i, &sfd[i], BPF_NOEXIST);\n\tif (!err) {\n\t\tprintf(\"Failed allowed sockmap dup slot BPF_NOEXIST\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);\n\tif (err) {\n\t\tprintf(\"Failed sockmap update new slot BPF_ANY\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_map_update_elem(fd, &i, &sfd[i], BPF_EXIST);\n\tif (err) {\n\t\tprintf(\"Failed sockmap update new slot BPF_EXIST\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\t/* Delete the elems without programs */\n\tfor (i = 2; i < 6; i++) {\n\t\terr = bpf_map_delete_elem(fd, &i);\n\t\tif (err) {\n\t\t\tprintf(\"Failed delete sockmap %i '%i:%i'\\n\",\n\t\t\t err, i, sfd[i]);\n\t\t}\n\t}\n\n\t/* Test having multiple maps open and set with programs on same fds */\n\terr = bpf_prog_attach(parse_prog, fd,\n\t\t\t BPF_SK_SKB_STREAM_PARSER, 0);\n\tif (err) {\n\t\tprintf(\"Failed fd bpf parse prog attach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\terr = bpf_prog_attach(verdict_prog, fd,\n\t\t\t BPF_SK_SKB_STREAM_VERDICT, 0);\n\tif (err) {\n\t\tprintf(\"Failed fd bpf verdict prog attach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\tfor (i = 4; i < 6; i++) {\n\t\terr = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);\n\t\tif (!err) {\n\t\t\tprintf(\"Failed allowed duplicate programs in update ANY sockmap %i '%i:%i'\\n\",\n\t\t\t err, i, sfd[i]);\n\t\t\tgoto out_sockmap;\n\t\t}\n\t\terr = bpf_map_update_elem(fd, &i, &sfd[i], BPF_NOEXIST);\n\t\tif (!err) {\n\t\t\tprintf(\"Failed allowed duplicate program in update NOEXIST sockmap %i '%i:%i'\\n\",\n\t\t\t err, i, sfd[i]);\n\t\t\tgoto out_sockmap;\n\t\t}\n\t\terr = bpf_map_update_elem(fd, &i, &sfd[i], BPF_EXIST);\n\t\tif (!err) {\n\t\t\tprintf(\"Failed allowed duplicate program in update EXIST sockmap %i '%i:%i'\\n\",\n\t\t\t err, i, sfd[i]);\n\t\t\tgoto out_sockmap;\n\t\t}\n\t}\n\n\t/* Test tasks number of forked operations */\n\tfor (i = 0; i < tasks; i++) {\n\t\tpid[i] = fork();\n\t\tif (pid[i] == 0) {\n\t\t\tfor (i = 0; i < 6; i++) {\n\t\t\t\tbpf_map_delete_elem(map_fd_tx, &i);\n\t\t\t\tbpf_map_delete_elem(map_fd_rx, &i);\n\t\t\t\tbpf_map_update_elem(map_fd_tx, &i,\n\t\t\t\t\t\t &sfd[i], BPF_ANY);\n\t\t\t\tbpf_map_update_elem(map_fd_rx, &i,\n\t\t\t\t\t\t &sfd[i], BPF_ANY);\n\t\t\t}\n\t\t\texit(0);\n\t\t} else if (pid[i] == -1) {\n\t\t\tprintf(\"Couldn't spawn #%d process!\\n\", i);\n\t\t\texit(1);\n\t\t}\n\t}\n\n\tfor (i = 0; i < tasks; i++) {\n\t\tint status;\n\n\t\tassert(waitpid(pid[i], &status, 0) == pid[i]);\n\t\tassert(status == 0);\n\t}\n\n\terr = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE);\n\tif (!err) {\n\t\tprintf(\"Detached an invalid prog type.\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER);\n\tif (err) {\n\t\tprintf(\"Failed parser prog detach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\terr = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);\n\tif (err) {\n\t\tprintf(\"Failed parser prog detach\\n\");\n\t\tgoto out_sockmap;\n\t}\n\n\t/* Test map close sockets and empty maps */\n\tfor (i = 0; i < 6; i++) {\n\t\tbpf_map_delete_elem(map_fd_tx, &i);\n\t\tbpf_map_delete_elem(map_fd_rx, &i);\n\t\tclose(sfd[i]);\n\t}\n\tclose(fd);\n\tclose(map_fd_rx);\n\tbpf_object__close(obj);\n\treturn;\nout:\n\tfor (i = 0; i < 6; i++)\n\t\tclose(sfd[i]);\n\tprintf(\"Failed to create sockmap '%i:%s'!\\n\", i, strerror(errno));\n\texit(1);\nout_sockmap:\n\tfor (i = 0; i < 6; i++) {\n\t\tif (map_fd_tx)\n\t\t\tbpf_map_delete_elem(map_fd_tx, &i);\n\t\tif (map_fd_rx)\n\t\t\tbpf_map_delete_elem(map_fd_rx, &i);\n\t\tclose(sfd[i]);\n\t}\n\tclose(fd);\n\texit(1);\n}", "static int INIT gunzip(void)\n{\n uch flags;\n unsigned char magic[2]; /* magic header */\n char method;": "static int INIT gunzip(void)\n{\n uch flags;\n unsigned char magic[2]; /* magic header */\n char method;\n ulg orig_crc = 0; /* original crc */\n ulg orig_len = 0; /* original uncompressed length */\n int res;\n\n magic[0] = NEXTBYTE();\n magic[1] = NEXTBYTE();\n method = NEXTBYTE();\n\n if (magic[0] != 037 ||\n\t((magic[1] != 0213) && (magic[1] != 0236))) {\n\t error(\"bad gzip magic numbers\");\n\t return -1;\n }\n\n /* We only support method #8, DEFLATED */\n if (method != 8) {\n\t error(\"internal error, invalid method\");\n\t return -1;\n }\n\n flags = (uch)get_byte();\n if ((flags & ENCRYPTED) != 0) {\n\t error(\"Input is encrypted\");\n\t return -1;\n }\n if ((flags & CONTINUATION) != 0) {\n\t error(\"Multi part input\");\n\t return -1;\n }\n if ((flags & RESERVED) != 0) {\n\t error(\"Input has invalid flags\");\n\t return -1;\n }\n NEXTBYTE();\t/* Get timestamp */\n NEXTBYTE();\n NEXTBYTE();\n NEXTBYTE();\n\n (void)NEXTBYTE(); /* Ignore extra flags for the moment */\n (void)NEXTBYTE(); /* Ignore OS type for the moment */\n\n if ((flags & EXTRA_FIELD) != 0) {\n\t unsigned len = (unsigned)NEXTBYTE();\n\t len |= ((unsigned)NEXTBYTE())<<8;\n\t while (len--) (void)NEXTBYTE();\n }\n\n /* Get original file name if it was truncated */\n if ((flags & ORIG_NAME) != 0) {\n\t /* Discard the old name */\n\t while (NEXTBYTE() != 0) /* null */ ;\n } \n\n /* Discard file comment if any */\n if ((flags & COMMENT) != 0) {\n\t while (NEXTBYTE() != 0) /* null */ ;\n }\n\n /* Decompress */\n if ((res = inflate())) {\n\t switch (res) {\n\t case 0:\n\t\t break;\n\t case 1:\n\t\t error(\"invalid compressed format (err=1)\");\n\t\t break;\n\t case 2:\n\t\t error(\"invalid compressed format (err=2)\");\n\t\t break;\n\t case 3:\n\t\t error(\"out of memory\");\n\t\t break;\n\t case 4:\n\t\t error(\"out of input data\");\n\t\t break;\n\t default:\n\t\t error(\"invalid compressed format (other)\");\n\t }\n\t return -1;\n }\n\t \n /* Get the crc and original length */\n /* crc32 (see algorithm.doc)\n * uncompressed input size modulo 2^32\n */\n orig_crc = (ulg) NEXTBYTE();\n orig_crc |= (ulg) NEXTBYTE() << 8;\n orig_crc |= (ulg) NEXTBYTE() << 16;\n orig_crc |= (ulg) NEXTBYTE() << 24;\n \n orig_len = (ulg) NEXTBYTE();\n orig_len |= (ulg) NEXTBYTE() << 8;\n orig_len |= (ulg) NEXTBYTE() << 16;\n orig_len |= (ulg) NEXTBYTE() << 24;\n \n /* Validate decompression */\n if (orig_crc != CRC_VALUE) {\n\t error(\"crc error\");\n\t return -1;\n }\n if (orig_len != bytes_out) {\n\t error(\"length error\");\n\t return -1;\n }\n return 0;\n\n underrun:\t\t\t/* NEXTBYTE() goto's here if needed */\n error(\"out of input data\");\n return -1;\n}", "static void gre_err(struct sk_buff *skb, u32 info)\n{\n\t/* All the routers (except for Linux) return only\n\t * 8 bytes of packet payload. It means, that precise relaying of\n\t * ICMP in the real Internet is absolutely infeasible.": "static void gre_err(struct sk_buff *skb, u32 info)\n{\n\t/* All the routers (except for Linux) return only\n\t * 8 bytes of packet payload. It means, that precise relaying of\n\t * ICMP in the real Internet is absolutely infeasible.\n\t *\n\t * Moreover, Cisco \"wise men\" put GRE key to the third word\n\t * in GRE header. It makes impossible maintaining even soft\n\t * state for keyed\n\t * GRE tunnels with enabled checksum. Tell them \"thank you\".\n\t *\n\t * Well, I wonder, rfc1812 was written by Cisco employee,\n\t * what the hell these idiots break standards established\n\t * by themselves???\n\t */\n\n\tconst struct iphdr *iph = (struct iphdr *)skb->data;\n\tconst int type = icmp_hdr(skb)->type;\n\tconst int code = icmp_hdr(skb)->code;\n\tstruct tnl_ptk_info tpi;\n\n\tif (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),\n\t\t\t iph->ihl * 4) < 0)\n\t\treturn;\n\n\tif (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {\n\t\tipv4_update_pmtu(skb, dev_net(skb->dev), info,\n\t\t\t\t skb->dev->ifindex, IPPROTO_GRE);\n\t\treturn;\n\t}\n\tif (type == ICMP_REDIRECT) {\n\t\tipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,\n\t\t\t IPPROTO_GRE);\n\t\treturn;\n\t}\n\n\tipgre_err(skb, info, &tpi);\n}", "static void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx)\n{\n\tfd_fltr_ctx->comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;\n\tfd_fltr_ctx->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;\n\tfd_fltr_ctx->fd_space = ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST;": "static void ice_set_dflt_val_fd_desc(struct ice_fd_fltr_desc_ctx *fd_fltr_ctx)\n{\n\tfd_fltr_ctx->comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;\n\tfd_fltr_ctx->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;\n\tfd_fltr_ctx->fd_space = ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST;\n\tfd_fltr_ctx->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;\n\tfd_fltr_ctx->evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE;\n\tfd_fltr_ctx->toq = ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX;\n\tfd_fltr_ctx->toq_prio = ICE_FXD_FLTR_QW0_TO_Q_PRIO1;\n\tfd_fltr_ctx->dpu_recipe = ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT;\n\tfd_fltr_ctx->drop = ICE_FXD_FLTR_QW0_DROP_NO;\n\tfd_fltr_ctx->flex_prio = ICE_FXD_FLTR_QW0_FLEX_PRI_NONE;\n\tfd_fltr_ctx->flex_mdid = ICE_FXD_FLTR_QW0_FLEX_MDID0;\n\tfd_fltr_ctx->flex_val = ICE_FXD_FLTR_QW0_FLEX_VAL0;\n\tfd_fltr_ctx->dtype = ICE_TX_DESC_DTYPE_FLTR_PROG;\n\tfd_fltr_ctx->desc_prof_prio = ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO;\n\tfd_fltr_ctx->desc_prof = ICE_FXD_FLTR_QW1_PROF_ZERO;\n\tfd_fltr_ctx->swap = ICE_FXD_FLTR_QW1_SWAP_SET;\n\tfd_fltr_ctx->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;\n\tfd_fltr_ctx->fdid_mdid = ICE_FXD_FLTR_QW1_FDID_MDID_FD;\n\tfd_fltr_ctx->fdid = ICE_FXD_FLTR_QW1_FDID_ZERO;\n}", "static void rtl_hw_start_8168f(struct rtl8169_private *tp)\n{\n\trtl_set_def_aspm_entry_latency(tp);\n\n\trtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);": "static void rtl_hw_start_8168f(struct rtl8169_private *tp)\n{\n\trtl_set_def_aspm_entry_latency(tp);\n\n\trtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);\n\trtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000);\n\trtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);\n\trtl_reset_packet_filter(tp);\n\trtl_eri_set_bits(tp, 0x1b0, BIT(4));\n\trtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1));\n\trtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);\n\trtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);\n\n\trtl_disable_clock_request(tp);\n\n\tRTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);\n\tRTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);\n\tRTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);\n\tRTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);\n\n\trtl8168_config_eee_mac(tp);\n}", "static void sw_i2c_wait(void)\n{\n\t/* find a bug:\n\t * peekIO method works well before suspend/resume\n\t * but after suspend, peekIO(0x3ce,0x61) & 0x10": "static void sw_i2c_wait(void)\n{\n\t/* find a bug:\n\t * peekIO method works well before suspend/resume\n\t * but after suspend, peekIO(0x3ce,0x61) & 0x10\n\t * always be non-zero,which makes the while loop\n\t * never finish.\n\t * use non-ultimate for loop below is safe\n\t */\n\n /* Change wait algorithm to use PCI bus clock,\n * it's more reliable than counter loop ..\n * write 0x61 to 0x3ce and read from 0x3cf\n */\n\tint i, tmp;\n\n\tfor (i = 0; i < 600; i++) {\n\t\ttmp = i;\n\t\ttmp += i;\n\t}\n}", "static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)\n{\n#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED\tBIT(4)\n#define I40E_FEATURES_ENABLE_PTR\t\t0x2A\n#define I40E_CURRENT_SETTING_PTR\t\t0x2B": "static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)\n{\n#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED\tBIT(4)\n#define I40E_FEATURES_ENABLE_PTR\t\t0x2A\n#define I40E_CURRENT_SETTING_PTR\t\t0x2B\n#define I40E_LINK_BEHAVIOR_WORD_OFFSET\t\t0x2D\n#define I40E_LINK_BEHAVIOR_WORD_LENGTH\t\t0x1\n#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED\tBIT(0)\n#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH\t4\n\ti40e_status read_status = I40E_SUCCESS;\n\tu16 sr_emp_sr_settings_ptr = 0;\n\tu16 features_enable = 0;\n\tu16 link_behavior = 0;\n\tbool ret = false;\n\n\tread_status = i40e_read_nvm_word(&pf->hw,\n\t\t\t\t\t I40E_SR_EMP_SR_SETTINGS_PTR,\n\t\t\t\t\t &sr_emp_sr_settings_ptr);\n\tif (read_status)\n\t\tgoto err_nvm;\n\tread_status = i40e_read_nvm_word(&pf->hw,\n\t\t\t\t\t sr_emp_sr_settings_ptr +\n\t\t\t\t\t I40E_FEATURES_ENABLE_PTR,\n\t\t\t\t\t &features_enable);\n\tif (read_status)\n\t\tgoto err_nvm;\n\tif (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {\n\t\tread_status = i40e_read_nvm_module_data(&pf->hw,\n\t\t\t\t\t\t\tI40E_SR_EMP_SR_SETTINGS_PTR,\n\t\t\t\t\t\t\tI40E_CURRENT_SETTING_PTR,\n\t\t\t\t\t\t\tI40E_LINK_BEHAVIOR_WORD_OFFSET,\n\t\t\t\t\t\t\tI40E_LINK_BEHAVIOR_WORD_LENGTH,\n\t\t\t\t\t\t\t&link_behavior);\n\t\tif (read_status)\n\t\t\tgoto err_nvm;\n\t\tlink_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);\n\t\tret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;\n\t}\n\treturn ret;\n\nerr_nvm:\n\tdev_warn(&pf->pdev->dev,\n\t\t \"total-port-shutdown feature is off due to read nvm error: %s\\n\",\n\t\t i40e_stat_str(&pf->hw, read_status));\n\treturn ret;\n}", "static void alc_determine_headset_type(struct hda_codec *codec)\n{\n\tint val;\n\tbool is_ctia = false;\n\tstruct alc_spec *spec = codec->spec;": "static void alc_determine_headset_type(struct hda_codec *codec)\n{\n\tint val;\n\tbool is_ctia = false;\n\tstruct alc_spec *spec = codec->spec;\n\tstatic const struct coef_fw coef0255[] = {\n\t\tWRITE_COEF(0x45, 0xd089), /* combo jack auto switch control(Check type)*/\n\t\tWRITE_COEF(0x49, 0x0149), /* combo jack auto switch control(Vref\n conteol) */\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0288[] = {\n\t\tUPDATE_COEF(0x4f, 0xfcc0, 0xd400), /* Check Type */\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0298[] = {\n\t\tUPDATE_COEF(0x50, 0x2000, 0x2000),\n\t\tUPDATE_COEF(0x56, 0x0006, 0x0006),\n\t\tUPDATE_COEF(0x66, 0x0008, 0),\n\t\tUPDATE_COEF(0x67, 0x2000, 0),\n\t\tUPDATE_COEF(0x19, 0x1300, 0x1300),\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0293[] = {\n\t\tUPDATE_COEF(0x4a, 0x000f, 0x0008), /* Combo Jack auto detect */\n\t\tWRITE_COEF(0x45, 0xD429), /* Set to ctia type */\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0688[] = {\n\t\tWRITE_COEF(0x11, 0x0001),\n\t\tWRITE_COEF(0xb7, 0x802b),\n\t\tWRITE_COEF(0x15, 0x0d60),\n\t\tWRITE_COEF(0xc3, 0x0c00),\n\t\t{}\n\t};\n\tstatic const struct coef_fw coef0274[] = {\n\t\tUPDATE_COEF(0x4a, 0x0010, 0),\n\t\tUPDATE_COEF(0x4a, 0x8000, 0),\n\t\tWRITE_COEF(0x45, 0xd289),\n\t\tUPDATE_COEF(0x49, 0x0300, 0x0300),\n\t\t{}\n\t};\n\n\tif (spec->no_internal_mic_pin) {\n\t\talc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);\n\t\treturn;\n\t}\n\n\tswitch (codec->core.vendor_id) {\n\tcase 0x10ec0255:\n\t\talc_process_coef_fw(codec, coef0255);\n\t\tmsleep(300);\n\t\tval = alc_read_coef_idx(codec, 0x46);\n\t\tis_ctia = (val & 0x0070) == 0x0070;\n\t\tbreak;\n\tcase 0x10ec0230:\n\tcase 0x10ec0236:\n\tcase 0x10ec0256:\n\t\talc_write_coef_idx(codec, 0x1b, 0x0e4b);\n\t\talc_write_coef_idx(codec, 0x06, 0x6104);\n\t\talc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);\n\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);\n\t\tmsleep(80);\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);\n\n\t\talc_process_coef_fw(codec, coef0255);\n\t\tmsleep(300);\n\t\tval = alc_read_coef_idx(codec, 0x46);\n\t\tis_ctia = (val & 0x0070) == 0x0070;\n\n\t\talc_write_coefex_idx(codec, 0x57, 0x3, 0x0da3);\n\t\talc_update_coefex_idx(codec, 0x57, 0x5, 1<<14, 0);\n\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);\n\t\tmsleep(80);\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);\n\t\tbreak;\n\tcase 0x10ec0234:\n\tcase 0x10ec0274:\n\tcase 0x10ec0294:\n\t\talc_process_coef_fw(codec, coef0274);\n\t\tmsleep(850);\n\t\tval = alc_read_coef_idx(codec, 0x46);\n\t\tis_ctia = (val & 0x00f0) == 0x00f0;\n\t\tbreak;\n\tcase 0x10ec0233:\n\tcase 0x10ec0283:\n\t\talc_write_coef_idx(codec, 0x45, 0xd029);\n\t\tmsleep(300);\n\t\tval = alc_read_coef_idx(codec, 0x46);\n\t\tis_ctia = (val & 0x0070) == 0x0070;\n\t\tbreak;\n\tcase 0x10ec0298:\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);\n\t\tmsleep(100);\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);\n\t\tmsleep(200);\n\n\t\tval = alc_read_coef_idx(codec, 0x50);\n\t\tif (val & (1 << 12)) {\n\t\t\talc_update_coef_idx(codec, 0x8e, 0x0070, 0x0020);\n\t\t\talc_process_coef_fw(codec, coef0288);\n\t\t\tmsleep(350);\n\t\t\tval = alc_read_coef_idx(codec, 0x50);\n\t\t\tis_ctia = (val & 0x0070) == 0x0070;\n\t\t} else {\n\t\t\talc_update_coef_idx(codec, 0x8e, 0x0070, 0x0010);\n\t\t\talc_process_coef_fw(codec, coef0288);\n\t\t\tmsleep(350);\n\t\t\tval = alc_read_coef_idx(codec, 0x50);\n\t\t\tis_ctia = (val & 0x0070) == 0x0070;\n\t\t}\n\t\talc_process_coef_fw(codec, coef0298);\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP);\n\t\tmsleep(75);\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);\n\t\tbreak;\n\tcase 0x10ec0286:\n\tcase 0x10ec0288:\n\t\talc_process_coef_fw(codec, coef0288);\n\t\tmsleep(350);\n\t\tval = alc_read_coef_idx(codec, 0x50);\n\t\tis_ctia = (val & 0x0070) == 0x0070;\n\t\tbreak;\n\tcase 0x10ec0292:\n\t\talc_write_coef_idx(codec, 0x6b, 0xd429);\n\t\tmsleep(300);\n\t\tval = alc_read_coef_idx(codec, 0x6c);\n\t\tis_ctia = (val & 0x001c) == 0x001c;\n\t\tbreak;\n\tcase 0x10ec0293:\n\t\talc_process_coef_fw(codec, coef0293);\n\t\tmsleep(300);\n\t\tval = alc_read_coef_idx(codec, 0x46);\n\t\tis_ctia = (val & 0x0070) == 0x0070;\n\t\tbreak;\n\tcase 0x10ec0668:\n\t\talc_process_coef_fw(codec, coef0688);\n\t\tmsleep(300);\n\t\tval = alc_read_coef_idx(codec, 0xbe);\n\t\tis_ctia = (val & 0x1c02) == 0x1c02;\n\t\tbreak;\n\tcase 0x10ec0215:\n\tcase 0x10ec0225:\n\tcase 0x10ec0285:\n\tcase 0x10ec0295:\n\tcase 0x10ec0289:\n\tcase 0x10ec0299:\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);\n\t\tmsleep(80);\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);\n\n\t\talc_process_coef_fw(codec, alc225_pre_hsmode);\n\t\talc_update_coef_idx(codec, 0x67, 0xf000, 0x1000);\n\t\tval = alc_read_coef_idx(codec, 0x45);\n\t\tif (val & (1 << 9)) {\n\t\t\talc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x34<<10);\n\t\t\talc_update_coef_idx(codec, 0x49, 3<<8, 2<<8);\n\t\t\tmsleep(800);\n\t\t\tval = alc_read_coef_idx(codec, 0x46);\n\t\t\tis_ctia = (val & 0x00f0) == 0x00f0;\n\t\t} else {\n\t\t\talc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x34<<10);\n\t\t\talc_update_coef_idx(codec, 0x49, 3<<8, 1<<8);\n\t\t\tmsleep(800);\n\t\t\tval = alc_read_coef_idx(codec, 0x46);\n\t\t\tis_ctia = (val & 0x00f0) == 0x00f0;\n\t\t}\n\t\talc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6);\n\t\talc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4);\n\t\talc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);\n\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);\n\t\tmsleep(80);\n\t\tsnd_hda_codec_write(codec, 0x21, 0,\n\t\t\t AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);\n\t\tbreak;\n\tcase 0x10ec0867:\n\t\tis_ctia = true;\n\t\tbreak;\n\t}\n\n\tcodec_dbg(codec, \"Headset jack detected iPhone-style headset: %s\\n\",\n\t\t is_ctia ? \"yes\" : \"no\");\n\tspec->current_headset_type = is_ctia ? ALC_HEADSET_TYPE_CTIA : ALC_HEADSET_TYPE_OMTP;\n}", "static int rvu_map_cgx_lmac_pf(struct rvu *rvu)\n{\n\tstruct npc_pkind *pkind = &rvu->hw->pkind;\n\tint cgx_cnt_max = rvu->cgx_cnt_max;\n\tint pf = PF_CGXMAP_BASE;": "static int rvu_map_cgx_lmac_pf(struct rvu *rvu)\n{\n\tstruct npc_pkind *pkind = &rvu->hw->pkind;\n\tint cgx_cnt_max = rvu->cgx_cnt_max;\n\tint pf = PF_CGXMAP_BASE;\n\tunsigned long lmac_bmap;\n\tint size, free_pkind;\n\tint cgx, lmac, iter;\n\tint numvfs, hwvfs;\n\n\tif (!cgx_cnt_max)\n\t\treturn 0;\n\n\tif (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)\n\t\treturn -EINVAL;\n\n\t/* Alloc map table\n\t * An additional entry is required since PF id starts from 1 and\n\t * hence entry at offset 0 is invalid.\n\t */\n\tsize = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);\n\trvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);\n\tif (!rvu->pf2cgxlmac_map)\n\t\treturn -ENOMEM;\n\n\t/* Initialize all entries with an invalid cgx and lmac id */\n\tmemset(rvu->pf2cgxlmac_map, 0xFF, size);\n\n\t/* Reverse map table */\n\trvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,\n\t\t\t\t cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),\n\t\t\t\t GFP_KERNEL);\n\tif (!rvu->cgxlmac2pf_map)\n\t\treturn -ENOMEM;\n\n\trvu->cgx_mapped_pfs = 0;\n\tfor (cgx = 0; cgx < cgx_cnt_max; cgx++) {\n\t\tif (!rvu_cgx_pdata(cgx, rvu))\n\t\t\tcontinue;\n\t\tlmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));\n\t\tfor_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {\n\t\t\tlmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),\n\t\t\t\t\t iter);\n\t\t\trvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);\n\t\t\trvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;\n\t\t\tfree_pkind = rvu_alloc_rsrc(&pkind->rsrc);\n\t\t\tpkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;\n\t\t\trvu_map_cgx_nix_block(rvu, pf, cgx, lmac);\n\t\t\trvu->cgx_mapped_pfs++;\n\t\t\trvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);\n\t\t\trvu->cgx_mapped_vfs += numvfs;\n\t\t\tpf++;\n\t\t}\n\t}\n\treturn 0;\n}", "static int set_mpegtei_handling(struct drx_demod_instance *demod)\n{\n\tstruct drxj_data *ext_attr = (struct drxj_data *) (NULL);\n\tstruct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL);\n\tint rc;": "static int set_mpegtei_handling(struct drx_demod_instance *demod)\n{\n\tstruct drxj_data *ext_attr = (struct drxj_data *) (NULL);\n\tstruct i2c_device_addr *dev_addr = (struct i2c_device_addr *)(NULL);\n\tint rc;\n\tu16 fec_oc_dpr_mode = 0;\n\tu16 fec_oc_snc_mode = 0;\n\tu16 fec_oc_ems_mode = 0;\n\n\tdev_addr = demod->my_i2c_dev_addr;\n\text_attr = (struct drxj_data *) demod->my_ext_attr;\n\n\trc = drxj_dap_read_reg16(dev_addr, FEC_OC_DPR_MODE__A, &fec_oc_dpr_mode, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_read_reg16(dev_addr, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_read_reg16(dev_addr, FEC_OC_EMS_MODE__A, &fec_oc_ems_mode, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\t/* reset to default, allow TEI bit to be changed */\n\tfec_oc_dpr_mode &= (~FEC_OC_DPR_MODE_ERR_DISABLE__M);\n\tfec_oc_snc_mode &= (~(FEC_OC_SNC_MODE_ERROR_CTL__M |\n\t\t\t FEC_OC_SNC_MODE_CORR_DISABLE__M));\n\tfec_oc_ems_mode &= (~FEC_OC_EMS_MODE_MODE__M);\n\n\tif (ext_attr->disable_te_ihandling) {\n\t\t/* do not change TEI bit */\n\t\tfec_oc_dpr_mode |= FEC_OC_DPR_MODE_ERR_DISABLE__M;\n\t\tfec_oc_snc_mode |= FEC_OC_SNC_MODE_CORR_DISABLE__M |\n\t\t ((0x2) << (FEC_OC_SNC_MODE_ERROR_CTL__B));\n\t\tfec_oc_ems_mode |= ((0x01) << (FEC_OC_EMS_MODE_MODE__B));\n\t}\n\n\trc = drxj_dap_write_reg16(dev_addr, FEC_OC_DPR_MODE__A, fec_oc_dpr_mode, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_MODE__A, fec_oc_snc_mode, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, FEC_OC_EMS_MODE__A, fec_oc_ems_mode, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\treturn 0;\nrw_error:\n\treturn rc;\n}", "static int ntfs_extend_mft(struct ntfs_sb_info *sbi)\n{\n\tint err;\n\tstruct ntfs_inode *ni = sbi->mft.ni;\n\tsize_t new_mft_total;": "static int ntfs_extend_mft(struct ntfs_sb_info *sbi)\n{\n\tint err;\n\tstruct ntfs_inode *ni = sbi->mft.ni;\n\tsize_t new_mft_total;\n\tu64 new_mft_bytes, new_bitmap_bytes;\n\tstruct ATTRIB *attr;\n\tstruct wnd_bitmap *wnd = &sbi->mft.bitmap;\n\n\tnew_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;\n\tnew_mft_bytes = (u64)new_mft_total << sbi->record_bits;\n\n\t/* Step 1: Resize $MFT::DATA. */\n\tdown_write(&ni->file.run_lock);\n\terr = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,\n\t\t\t new_mft_bytes, NULL, false, &attr);\n\n\tif (err) {\n\t\tup_write(&ni->file.run_lock);\n\t\tgoto out;\n\t}\n\n\tattr->nres.valid_size = attr->nres.data_size;\n\tnew_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;\n\tni->mi.dirty = true;\n\n\t/* Step 2: Resize $MFT::BITMAP. */\n\tnew_bitmap_bytes = bitmap_size(new_mft_total);\n\n\terr = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,\n\t\t\t new_bitmap_bytes, &new_bitmap_bytes, true, NULL);\n\n\t/* Refresh MFT Zone if necessary. */\n\tdown_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);\n\n\tntfs_refresh_zone(sbi);\n\n\tup_write(&sbi->used.bitmap.rw_lock);\n\tup_write(&ni->file.run_lock);\n\n\tif (err)\n\t\tgoto out;\n\n\terr = wnd_extend(wnd, new_mft_total);\n\n\tif (err)\n\t\tgoto out;\n\n\tntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);\n\n\terr = _ni_write_inode(&ni->vfs_inode, 0);\nout:\n\treturn err;\n}", "static int __init test_f5(struct crypto_shash *tfm_cmac)\n{\n\tconst u8 w[32] = {\n\t\t\t0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86,\n\t\t\t0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99,": "static int __init test_f5(struct crypto_shash *tfm_cmac)\n{\n\tconst u8 w[32] = {\n\t\t\t0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86,\n\t\t\t0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99,\n\t\t\t0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,\n\t\t\t0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };\n\tconst u8 n1[16] = {\n\t\t\t0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,\n\t\t\t0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };\n\tconst u8 n2[16] = {\n\t\t\t0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21,\n\t\t\t0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 };\n\tconst u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 };\n\tconst u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 };\n\tconst u8 exp_ltk[16] = {\n\t\t\t0x38, 0x0a, 0x75, 0x94, 0xb5, 0x22, 0x05, 0x98,\n\t\t\t0x23, 0xcd, 0xd7, 0x69, 0x11, 0x79, 0x86, 0x69 };\n\tconst u8 exp_mackey[16] = {\n\t\t\t0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd,\n\t\t\t0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 };\n\tu8 mackey[16], ltk[16];\n\tint err;\n\n\terr = smp_f5(tfm_cmac, w, n1, n2, a1, a2, mackey, ltk);\n\tif (err)\n\t\treturn err;\n\n\tif (crypto_memneq(mackey, exp_mackey, 16))\n\t\treturn -EINVAL;\n\n\tif (crypto_memneq(ltk, exp_ltk, 16))\n\t\treturn -EINVAL;\n\n\treturn 0;\n}", "static int hdsp_external_sample_rate(struct hdsp *hdsp)\n{\n\tunsigned int status2 = hdsp_read(hdsp, HDSP_status2Register);\n\tunsigned int rate_bits = status2 & HDSP_systemFrequencyMask;\n": "static int hdsp_external_sample_rate(struct hdsp *hdsp)\n{\n\tunsigned int status2 = hdsp_read(hdsp, HDSP_status2Register);\n\tunsigned int rate_bits = status2 & HDSP_systemFrequencyMask;\n\n\t/* For the 9632 card, there seems to be no bit for indicating external\n\t * sample rate greater than 96kHz. The card reports the corresponding\n\t * single speed. So the best means seems to get spdif rate when\n\t * autosync reference is spdif */\n\tif (hdsp->io_type == H9632 &&\n\t hdsp_autosync_ref(hdsp) == HDSP_AUTOSYNC_FROM_SPDIF)\n\t\t return hdsp_spdif_sample_rate(hdsp);\n\n\tswitch (rate_bits) {\n\tcase HDSP_systemFrequency32: return 32000;\n\tcase HDSP_systemFrequency44_1: return 44100;\n\tcase HDSP_systemFrequency48: return 48000;\n\tcase HDSP_systemFrequency64: return 64000;\n\tcase HDSP_systemFrequency88_2: return 88200;\n\tcase HDSP_systemFrequency96: return 96000;\n\tdefault:\n\t\treturn 0;\n\t}\n}", "static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)\n{\n\tstruct npc_mcam *mcam = &rvu->hw->mcam;\n\tu8 lid, lt, ld, bitnr;\n\tu8 key_nibble = 0;": "static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)\n{\n\tstruct npc_mcam *mcam = &rvu->hw->mcam;\n\tu8 lid, lt, ld, bitnr;\n\tu8 key_nibble = 0;\n\tu64 cfg;\n\n\t/* Scan and note how parse result is going to be in key.\n\t * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from\n\t * parse result in the key. The enabled nibbles from parse result\n\t * will be concatenated in key.\n\t */\n\tcfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));\n\tcfg &= NPC_PARSE_NIBBLE;\n\tfor_each_set_bit(bitnr, (unsigned long *)&cfg, 31) {\n\t\tnpc_scan_parse_result(mcam, bitnr, key_nibble, intf);\n\t\tkey_nibble++;\n\t}\n\n\t/* Scan and note how layer data is going to be in key */\n\tfor (lid = 0; lid < NPC_MAX_LID; lid++) {\n\t\tfor (lt = 0; lt < NPC_MAX_LT; lt++) {\n\t\t\tfor (ld = 0; ld < NPC_MAX_LD; ld++) {\n\t\t\t\tcfg = rvu_read64(rvu, blkaddr,\n\t\t\t\t\t\t NPC_AF_INTFX_LIDX_LTX_LDX_CFG\n\t\t\t\t\t\t (intf, lid, lt, ld));\n\t\t\t\tif (!FIELD_GET(NPC_LDATA_EN, cfg))\n\t\t\t\t\tcontinue;\n\t\t\t\tnpc_scan_ldata(rvu, blkaddr, lid, lt, cfg,\n\t\t\t\t\t intf);\n\t\t\t}\n\t\t}\n\t}\n\n\treturn 0;\n}", "static void serial_out(struct uart_sunsu_port *up, int offset, int value)\n{\n#ifndef CONFIG_SPARC64\n\t/*\n\t * MrCoffee has weird schematics: IRQ4 & P10(?) pins of SuperIO are": "static void serial_out(struct uart_sunsu_port *up, int offset, int value)\n{\n#ifndef CONFIG_SPARC64\n\t/*\n\t * MrCoffee has weird schematics: IRQ4 & P10(?) pins of SuperIO are\n\t * connected with a gate then go to SlavIO. When IRQ4 goes tristated\n\t * gate outputs a logical one. Since we use level triggered interrupts\n\t * we have lockup and watchdog reset. We cannot mask IRQ because\n\t * keyboard shares IRQ with us (Word has it as Bob Smelik's design).\n\t * This problem is similar to what Alpha people suffer, see\n\t * 8250_alpha.c.\n\t */\n\tif (offset == UART_MCR)\n\t\tvalue |= UART_MCR_OUT2;\n#endif\n\toffset <<= up->port.regshift;\n\n\tswitch (up->port.iotype) {\n\tcase UPIO_HUB6:\n\t\toutb(up->port.hub6 - 1 + offset, up->port.iobase);\n\t\toutb(value, up->port.iobase + 1);\n\t\tbreak;\n\n\tcase UPIO_MEM:\n\t\twriteb(value, up->port.membase + offset);\n\t\tbreak;\n\n\tdefault:\n\t\toutb(value, up->port.iobase + offset);\n\t}\n}", "static int __init test_f6(struct crypto_shash *tfm_cmac)\n{\n\tconst u8 w[16] = {\n\t\t\t0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd,\n\t\t\t0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 };": "static int __init test_f6(struct crypto_shash *tfm_cmac)\n{\n\tconst u8 w[16] = {\n\t\t\t0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd,\n\t\t\t0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 };\n\tconst u8 n1[16] = {\n\t\t\t0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff,\n\t\t\t0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 };\n\tconst u8 n2[16] = {\n\t\t\t0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21,\n\t\t\t0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 };\n\tconst u8 r[16] = {\n\t\t\t0xc8, 0x0f, 0x2d, 0x0c, 0xd2, 0x42, 0xda, 0x08,\n\t\t\t0x54, 0xbb, 0x53, 0xb4, 0x3b, 0x34, 0xa3, 0x12 };\n\tconst u8 io_cap[3] = { 0x02, 0x01, 0x01 };\n\tconst u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 };\n\tconst u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 };\n\tconst u8 exp[16] = {\n\t\t\t0x61, 0x8f, 0x95, 0xda, 0x09, 0x0b, 0x6c, 0xd2,\n\t\t\t0xc5, 0xe8, 0xd0, 0x9c, 0x98, 0x73, 0xc4, 0xe3 };\n\tu8 res[16];\n\tint err;\n\n\terr = smp_f6(tfm_cmac, w, n1, n2, r, io_cap, a1, a2, res);\n\tif (err)\n\t\treturn err;\n\n\tif (crypto_memneq(res, exp, 16))\n\t\treturn -EINVAL;\n\n\treturn 0;\n}", "static int INIT get_next_block(struct bunzip_data *bd)\n{\n\tstruct group_data *hufGroup = NULL;\n\tint *base = NULL;\n\tint *limit = NULL;": "static int INIT get_next_block(struct bunzip_data *bd)\n{\n\tstruct group_data *hufGroup = NULL;\n\tint *base = NULL;\n\tint *limit = NULL;\n\tint dbufCount, nextSym, dbufSize, groupCount, selector,\n\t\ti, j, k, t, runPos, symCount, symTotal, nSelectors, *byteCount;\n\tunsigned char uc, *symToByte, *mtfSymbol, *selectors;\n\tunsigned int *dbuf, origPtr;\n\n\tdbuf = bd->dbuf;\n\tdbufSize = bd->dbufSize;\n\tselectors = bd->selectors;\n\tbyteCount = bd->byteCount;\n\tsymToByte = bd->symToByte;\n\tmtfSymbol = bd->mtfSymbol;\n\n\t/* Read in header signature and CRC, then validate signature.\n\t (last block signature means CRC is for whole file, return now) */\n\ti = get_bits(bd, 24);\n\tj = get_bits(bd, 24);\n\tbd->headerCRC = get_bits(bd, 32);\n\tif ((i == 0x177245) && (j == 0x385090))\n\t\treturn RETVAL_LAST_BLOCK;\n\tif ((i != 0x314159) || (j != 0x265359))\n\t\treturn RETVAL_NOT_BZIP_DATA;\n\t/* We can add support for blockRandomised if anybody complains.\n\t There was some code for this in busybox 1.0.0-pre3, but nobody ever\n\t noticed that it didn't actually work. */\n\tif (get_bits(bd, 1))\n\t\treturn RETVAL_OBSOLETE_INPUT;\n\torigPtr = get_bits(bd, 24);\n\tif (origPtr >= dbufSize)\n\t\treturn RETVAL_DATA_ERROR;\n\t/* mapping table: if some byte values are never used (encoding things\n\t like ascii text), the compression code removes the gaps to have fewer\n\t symbols to deal with, and writes a sparse bitfield indicating which\n\t values were present. We make a translation table to convert the\n\t symbols back to the corresponding bytes. */\n\tt = get_bits(bd, 16);\n\tsymTotal = 0;\n\tfor (i = 0; i < 16; i++) {\n\t\tif (t&(1 << (15-i))) {\n\t\t\tk = get_bits(bd, 16);\n\t\t\tfor (j = 0; j < 16; j++)\n\t\t\t\tif (k&(1 << (15-j)))\n\t\t\t\t\tsymToByte[symTotal++] = (16*i)+j;\n\t\t}\n\t}\n\t/* How many different Huffman coding groups does this block use? */\n\tgroupCount = get_bits(bd, 3);\n\tif (groupCount < 2 || groupCount > MAX_GROUPS)\n\t\treturn RETVAL_DATA_ERROR;\n\t/* nSelectors: Every GROUP_SIZE many symbols we select a new\n\t Huffman coding group. Read in the group selector list,\n\t which is stored as MTF encoded bit runs. (MTF = Move To\n\t Front, as each value is used it's moved to the start of the\n\t list.) */\n\tnSelectors = get_bits(bd, 15);\n\tif (!nSelectors)\n\t\treturn RETVAL_DATA_ERROR;\n\tfor (i = 0; i < groupCount; i++)\n\t\tmtfSymbol[i] = i;\n\tfor (i = 0; i < nSelectors; i++) {\n\t\t/* Get next value */\n\t\tfor (j = 0; get_bits(bd, 1); j++)\n\t\t\tif (j >= groupCount)\n\t\t\t\treturn RETVAL_DATA_ERROR;\n\t\t/* Decode MTF to get the next selector */\n\t\tuc = mtfSymbol[j];\n\t\tfor (; j; j--)\n\t\t\tmtfSymbol[j] = mtfSymbol[j-1];\n\t\tmtfSymbol[0] = selectors[i] = uc;\n\t}\n\t/* Read the Huffman coding tables for each group, which code\n\t for symTotal literal symbols, plus two run symbols (RUNA,\n\t RUNB) */\n\tsymCount = symTotal+2;\n\tfor (j = 0; j < groupCount; j++) {\n\t\tunsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];\n\t\tint\tminLen,\tmaxLen, pp;\n\t\t/* Read Huffman code lengths for each symbol. They're\n\t\t stored in a way similar to mtf; record a starting\n\t\t value for the first symbol, and an offset from the\n\t\t previous value for everys symbol after that.\n\t\t (Subtracting 1 before the loop and then adding it\n\t\t back at the end is an optimization that makes the\n\t\t test inside the loop simpler: symbol length 0\n\t\t becomes negative, so an unsigned inequality catches\n\t\t it.) */\n\t\tt = get_bits(bd, 5)-1;\n\t\tfor (i = 0; i < symCount; i++) {\n\t\t\tfor (;;) {\n\t\t\t\tif (((unsigned)t) > (MAX_HUFCODE_BITS-1))\n\t\t\t\t\treturn RETVAL_DATA_ERROR;\n\n\t\t\t\t/* If first bit is 0, stop. Else\n\t\t\t\t second bit indicates whether to\n\t\t\t\t increment or decrement the value.\n\t\t\t\t Optimization: grab 2 bits and unget\n\t\t\t\t the second if the first was 0. */\n\n\t\t\t\tk = get_bits(bd, 2);\n\t\t\t\tif (k < 2) {\n\t\t\t\t\tbd->inbufBitCount++;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\t/* Add one if second bit 1, else\n\t\t\t\t * subtract 1. Avoids if/else */\n\t\t\t\tt += (((k+1)&2)-1);\n\t\t\t}\n\t\t\t/* Correct for the initial -1, to get the\n\t\t\t * final symbol length */\n\t\t\tlength[i] = t+1;\n\t\t}\n\t\t/* Find largest and smallest lengths in this group */\n\t\tminLen = maxLen = length[0];\n\n\t\tfor (i = 1; i < symCount; i++) {\n\t\t\tif (length[i] > maxLen)\n\t\t\t\tmaxLen = length[i];\n\t\t\telse if (length[i] < minLen)\n\t\t\t\tminLen = length[i];\n\t\t}\n\n\t\t/* Calculate permute[], base[], and limit[] tables from\n\t\t * length[].\n\t\t *\n\t\t * permute[] is the lookup table for converting\n\t\t * Huffman coded symbols into decoded symbols. base[]\n\t\t * is the amount to subtract from the value of a\n\t\t * Huffman symbol of a given length when using\n\t\t * permute[].\n\t\t *\n\t\t * limit[] indicates the largest numerical value a\n\t\t * symbol with a given number of bits can have. This\n\t\t * is how the Huffman codes can vary in length: each\n\t\t * code with a value > limit[length] needs another\n\t\t * bit.\n\t\t */\n\t\thufGroup = bd->groups+j;\n\t\thufGroup->minLen = minLen;\n\t\thufGroup->maxLen = maxLen;\n\t\t/* Note that minLen can't be smaller than 1, so we\n\t\t adjust the base and limit array pointers so we're\n\t\t not always wasting the first entry. We do this\n\t\t again when using them (during symbol decoding).*/\n\t\tbase = hufGroup->base-1;\n\t\tlimit = hufGroup->limit-1;\n\t\t/* Calculate permute[]. Concurrently, initialize\n\t\t * temp[] and limit[]. */\n\t\tpp = 0;\n\t\tfor (i = minLen; i <= maxLen; i++) {\n\t\t\ttemp[i] = limit[i] = 0;\n\t\t\tfor (t = 0; t < symCount; t++)\n\t\t\t\tif (length[t] == i)\n\t\t\t\t\thufGroup->permute[pp++] = t;\n\t\t}\n\t\t/* Count symbols coded for at each bit length */\n\t\tfor (i = 0; i < symCount; i++)\n\t\t\ttemp[length[i]]++;\n\t\t/* Calculate limit[] (the largest symbol-coding value\n\t\t *at each bit length, which is (previous limit <<\n\t\t *1)+symbols at this level), and base[] (number of\n\t\t *symbols to ignore at each bit length, which is limit\n\t\t *minus the cumulative count of symbols coded for\n\t\t *already). */\n\t\tpp = t = 0;\n\t\tfor (i = minLen; i < maxLen; i++) {\n\t\t\tpp += temp[i];\n\t\t\t/* We read the largest possible symbol size\n\t\t\t and then unget bits after determining how\n\t\t\t many we need, and those extra bits could be\n\t\t\t set to anything. (They're noise from\n\t\t\t future symbols.) At each level we're\n\t\t\t really only interested in the first few\n\t\t\t bits, so here we set all the trailing\n\t\t\t to-be-ignored bits to 1 so they don't\n\t\t\t affect the value > limit[length]\n\t\t\t comparison. */\n\t\t\tlimit[i] = (pp << (maxLen - i)) - 1;\n\t\t\tpp <<= 1;\n\t\t\tbase[i+1] = pp-(t += temp[i]);\n\t\t}\n\t\tlimit[maxLen+1] = INT_MAX; /* Sentinel value for\n\t\t\t\t\t * reading next sym. */\n\t\tlimit[maxLen] = pp+temp[maxLen]-1;\n\t\tbase[minLen] = 0;\n\t}\n\t/* We've finished reading and digesting the block header. Now\n\t read this block's Huffman coded symbols from the file and\n\t undo the Huffman coding and run length encoding, saving the\n\t result into dbuf[dbufCount++] = uc */\n\n\t/* Initialize symbol occurrence counters and symbol Move To\n\t * Front table */\n\tfor (i = 0; i < 256; i++) {\n\t\tbyteCount[i] = 0;\n\t\tmtfSymbol[i] = (unsigned char)i;\n\t}\n\t/* Loop through compressed symbols. */\n\trunPos = dbufCount = symCount = selector = 0;\n\tfor (;;) {\n\t\t/* Determine which Huffman coding group to use. */\n\t\tif (!(symCount--)) {\n\t\t\tsymCount = GROUP_SIZE-1;\n\t\t\tif (selector >= nSelectors)\n\t\t\t\treturn RETVAL_DATA_ERROR;\n\t\t\thufGroup = bd->groups+selectors[selector++];\n\t\t\tbase = hufGroup->base-1;\n\t\t\tlimit = hufGroup->limit-1;\n\t\t}\n\t\t/* Read next Huffman-coded symbol. */\n\t\t/* Note: It is far cheaper to read maxLen bits and\n\t\t back up than it is to read minLen bits and then an\n\t\t additional bit at a time, testing as we go.\n\t\t Because there is a trailing last block (with file\n\t\t CRC), there is no danger of the overread causing an\n\t\t unexpected EOF for a valid compressed file. As a\n\t\t further optimization, we do the read inline\n\t\t (falling back to a call to get_bits if the buffer\n\t\t runs dry). The following (up to got_huff_bits:) is\n\t\t equivalent to j = get_bits(bd, hufGroup->maxLen);\n\t\t */\n\t\twhile (bd->inbufBitCount < hufGroup->maxLen) {\n\t\t\tif (bd->inbufPos == bd->inbufCount) {\n\t\t\t\tj = get_bits(bd, hufGroup->maxLen);\n\t\t\t\tgoto got_huff_bits;\n\t\t\t}\n\t\t\tbd->inbufBits =\n\t\t\t\t(bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];\n\t\t\tbd->inbufBitCount += 8;\n\t\t}\n\t\tbd->inbufBitCount -= hufGroup->maxLen;\n\t\tj = (bd->inbufBits >> bd->inbufBitCount)&\n\t\t\t((1 << hufGroup->maxLen)-1);\ngot_huff_bits:\n\t\t/* Figure how many bits are in next symbol and\n\t\t * unget extras */\n\t\ti = hufGroup->minLen;\n\t\twhile (j > limit[i])\n\t\t\t++i;\n\t\tbd->inbufBitCount += (hufGroup->maxLen - i);\n\t\t/* Huffman decode value to get nextSym (with bounds checking) */\n\t\tif ((i > hufGroup->maxLen)\n\t\t\t|| (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i]))\n\t\t\t\t>= MAX_SYMBOLS))\n\t\t\treturn RETVAL_DATA_ERROR;\n\t\tnextSym = hufGroup->permute[j];\n\t\t/* We have now decoded the symbol, which indicates\n\t\t either a new literal byte, or a repeated run of the\n\t\t most recent literal byte. First, check if nextSym\n\t\t indicates a repeated run, and if so loop collecting\n\t\t how many times to repeat the last literal. */\n\t\tif (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */\n\t\t\t/* If this is the start of a new run, zero out\n\t\t\t * counter */\n\t\t\tif (!runPos) {\n\t\t\t\trunPos = 1;\n\t\t\t\tt = 0;\n\t\t\t}\n\t\t\t/* Neat trick that saves 1 symbol: instead of\n\t\t\t or-ing 0 or 1 at each bit position, add 1\n\t\t\t or 2 instead. For example, 1011 is 1 << 0\n\t\t\t + 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1\n\t\t\t + 1 << 2. You can make any bit pattern\n\t\t\t that way using 1 less symbol than the basic\n\t\t\t or 0/1 method (except all bits 0, which\n\t\t\t would use no symbols, but a run of length 0\n\t\t\t doesn't mean anything in this context).\n\t\t\t Thus space is saved. */\n\t\t\tt += (runPos << nextSym);\n\t\t\t/* +runPos if RUNA; +2*runPos if RUNB */\n\n\t\t\trunPos <<= 1;\n\t\t\tcontinue;\n\t\t}\n\t\t/* When we hit the first non-run symbol after a run,\n\t\t we now know how many times to repeat the last\n\t\t literal, so append that many copies to our buffer\n\t\t of decoded symbols (dbuf) now. (The last literal\n\t\t used is the one at the head of the mtfSymbol\n\t\t array.) */\n\t\tif (runPos) {\n\t\t\trunPos = 0;\n\t\t\tif (dbufCount+t >= dbufSize)\n\t\t\t\treturn RETVAL_DATA_ERROR;\n\n\t\t\tuc = symToByte[mtfSymbol[0]];\n\t\t\tbyteCount[uc] += t;\n\t\t\twhile (t--)\n\t\t\t\tdbuf[dbufCount++] = uc;\n\t\t}\n\t\t/* Is this the terminating symbol? */\n\t\tif (nextSym > symTotal)\n\t\t\tbreak;\n\t\t/* At this point, nextSym indicates a new literal\n\t\t character. Subtract one to get the position in the\n\t\t MTF array at which this literal is currently to be\n\t\t found. (Note that the result can't be -1 or 0,\n\t\t because 0 and 1 are RUNA and RUNB. But another\n\t\t instance of the first symbol in the mtf array,\n\t\t position 0, would have been handled as part of a\n\t\t run above. Therefore 1 unused mtf position minus 2\n\t\t non-literal nextSym values equals -1.) */\n\t\tif (dbufCount >= dbufSize)\n\t\t\treturn RETVAL_DATA_ERROR;\n\t\ti = nextSym - 1;\n\t\tuc = mtfSymbol[i];\n\t\t/* Adjust the MTF array. Since we typically expect to\n\t\t *move only a small number of symbols, and are bound\n\t\t *by 256 in any case, using memmove here would\n\t\t *typically be bigger and slower due to function call\n\t\t *overhead and other assorted setup costs. */\n\t\tdo {\n\t\t\tmtfSymbol[i] = mtfSymbol[i-1];\n\t\t} while (--i);\n\t\tmtfSymbol[0] = uc;\n\t\tuc = symToByte[uc];\n\t\t/* We have our literal byte. Save it into dbuf. */\n\t\tbyteCount[uc]++;\n\t\tdbuf[dbufCount++] = (unsigned int)uc;\n\t}\n\t/* At this point, we've read all the Huffman-coded symbols\n\t (and repeated runs) for this block from the input stream,\n\t and decoded them into the intermediate buffer. There are\n\t dbufCount many decoded bytes in dbuf[]. Now undo the\n\t Burrows-Wheeler transform on dbuf. See\n\t http://dogma.net/markn/articles/bwt/bwt.htm\n\t */\n\t/* Turn byteCount into cumulative occurrence counts of 0 to n-1. */\n\tj = 0;\n\tfor (i = 0; i < 256; i++) {\n\t\tk = j+byteCount[i];\n\t\tbyteCount[i] = j;\n\t\tj = k;\n\t}\n\t/* Figure out what order dbuf would be in if we sorted it. */\n\tfor (i = 0; i < dbufCount; i++) {\n\t\tuc = (unsigned char)(dbuf[i] & 0xff);\n\t\tdbuf[byteCount[uc]] |= (i << 8);\n\t\tbyteCount[uc]++;\n\t}\n\t/* Decode first byte by hand to initialize \"previous\" byte.\n\t Note that it doesn't get output, and if the first three\n\t characters are identical it doesn't qualify as a run (hence\n\t writeRunCountdown = 5). */\n\tif (dbufCount) {\n\t\tif (origPtr >= dbufCount)\n\t\t\treturn RETVAL_DATA_ERROR;\n\t\tbd->writePos = dbuf[origPtr];\n\t\tbd->writeCurrent = (unsigned char)(bd->writePos&0xff);\n\t\tbd->writePos >>= 8;\n\t\tbd->writeRunCountdown = 5;\n\t}\n\tbd->writeCount = dbufCount;\n\n\treturn RETVAL_OK;\n}", "static int io_splice(struct io_kiocb *req, unsigned int issue_flags)\n{\n\tstruct io_splice *sp = &req->splice;\n\tstruct file *out = sp->file_out;\n\tunsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;": "static int io_splice(struct io_kiocb *req, unsigned int issue_flags)\n{\n\tstruct io_splice *sp = &req->splice;\n\tstruct file *out = sp->file_out;\n\tunsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;\n\tloff_t *poff_in, *poff_out;\n\tstruct file *in;\n\tlong ret = 0;\n\n\tif (issue_flags & IO_URING_F_NONBLOCK)\n\t\treturn -EAGAIN;\n\n\tif (sp->flags & SPLICE_F_FD_IN_FIXED)\n\t\tin = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);\n\telse\n\t\tin = io_file_get_normal(req, sp->splice_fd_in);\n\tif (!in) {\n\t\tret = -EBADF;\n\t\tgoto done;\n\t}\n\n\tpoff_in = (sp->off_in == -1) ? NULL : &sp->off_in;\n\tpoff_out = (sp->off_out == -1) ? NULL : &sp->off_out;\n\n\tif (sp->len)\n\t\tret = do_splice(in, poff_in, out, poff_out, sp->len, flags);\n\n\tif (!(sp->flags & SPLICE_F_FD_IN_FIXED))\n\t\tio_put_file(in);\ndone:\n\tif (ret != sp->len)\n\t\treq_set_fail(req);\n\t__io_req_complete(req, 0, ret, 0);\n\treturn 0;\n}", "static void test_copy_key(void)\n{\n\tstruct test_default t = test_default_init(guest_copy_key);\n\n\tHOST_SYNC(t.vcpu, STAGE_SKEYS_SET);": "static void test_copy_key(void)\n{\n\tstruct test_default t = test_default_init(guest_copy_key);\n\n\tHOST_SYNC(t.vcpu, STAGE_SKEYS_SET);\n\n\t/* vm, no key */\n\tDEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size);\n\n\t/* vm/vcpu, machting key or key 0 */\n\tDEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(0));\n\tDEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, t.size, KEY(9));\n\tDEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(0));\n\tDEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, t.size, KEY(9));\n\t/*\n\t * There used to be different code paths for key handling depending on\n\t * if the region crossed a page boundary.\n\t * There currently are not, but the more tests the merrier.\n\t */\n\tDEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(0));\n\tDEFAULT_WRITE_READ(t.vcpu, t.vcpu, LOGICAL, 1, KEY(9));\n\tDEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(0));\n\tDEFAULT_WRITE_READ(t.vcpu, t.vm, ABSOLUTE, 1, KEY(9));\n\n\t/* vm/vcpu, mismatching keys on read, but no fetch protection */\n\tDEFAULT_READ(t.vcpu, t.vcpu, LOGICAL, t.size, GADDR_V(mem2), KEY(2));\n\tDEFAULT_READ(t.vcpu, t.vm, ABSOLUTE, t.size, GADDR_V(mem1), KEY(2));\n\n\tkvm_vm_free(t.kvm_vm);\n}", "static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)\n{\n\tstruct bpf_insn *insn = self->fill_insns;\n\t/* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,\n\t * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted": "static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)\n{\n\tstruct bpf_insn *insn = self->fill_insns;\n\t/* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,\n\t * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted\n\t * to extend the error value of the inlined ld_abs sequence which then\n\t * contains 7 insns. so, set the dividend to 7 so the testcase could\n\t * work on all arches.\n\t */\n\tunsigned int len = (1 << 15) / 7;\n\tint i = 0;\n\n\tinsn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);\n\tinsn[i++] = BPF_LD_ABS(BPF_B, 0);\n\tinsn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);\n\ti++;\n\twhile (i < len - 1)\n\t\tinsn[i++] = BPF_LD_ABS(BPF_B, 1);\n\tinsn[i] = BPF_EXIT_INSN();\n\tself->prog_len = i + 1;\n}", "static int __init thermal_init(struct ibm_init_struct *iibm)\n{\n\tu8 t, ta1, ta2, ver = 0;\n\tint i;\n\tint acpi_tmp7;": "static int __init thermal_init(struct ibm_init_struct *iibm)\n{\n\tu8 t, ta1, ta2, ver = 0;\n\tint i;\n\tint acpi_tmp7;\n\n\tvdbg_printk(TPACPI_DBG_INIT, \"initializing thermal subdriver\\n\");\n\n\tacpi_tmp7 = acpi_evalf(ec_handle, NULL, \"TMP7\", \"qv\");\n\n\tif (thinkpad_id.ec_model) {\n\t\t/*\n\t\t * Direct EC access mode: sensors at registers\n\t\t * 0x78-0x7F, 0xC0-0xC7. Registers return 0x00 for\n\t\t * non-implemented, thermal sensors return 0x80 when\n\t\t * not available\n\t\t * The above rule is unfortunately flawed. This has been seen with\n\t\t * 0xC2 (power supply ID) causing thermal control problems.\n\t\t * The EC version can be determined by offset 0xEF and at least for\n\t\t * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7\n\t\t * are not thermal registers.\n\t\t */\n\t\tif (!acpi_ec_read(TP_EC_FUNCREV, &ver))\n\t\t\tpr_warn(\"Thinkpad ACPI EC unable to access EC version\\n\");\n\n\t\tta1 = ta2 = 0;\n\t\tfor (i = 0; i < 8; i++) {\n\t\t\tif (acpi_ec_read(TP_EC_THERMAL_TMP0 + i, &t)) {\n\t\t\t\tta1 |= t;\n\t\t\t} else {\n\t\t\t\tta1 = 0;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tif (ver < 3) {\n\t\t\t\tif (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {\n\t\t\t\t\tta2 |= t;\n\t\t\t\t} else {\n\t\t\t\t\tta1 = 0;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif (ta1 == 0) {\n\t\t\t/* This is sheer paranoia, but we handle it anyway */\n\t\t\tif (acpi_tmp7) {\n\t\t\t\tpr_err(\"ThinkPad ACPI EC access misbehaving, falling back to ACPI TMPx access mode\\n\");\n\t\t\t\tthermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;\n\t\t\t} else {\n\t\t\t\tpr_err(\"ThinkPad ACPI EC access misbehaving, disabling thermal sensors access\\n\");\n\t\t\t\tthermal_read_mode = TPACPI_THERMAL_NONE;\n\t\t\t}\n\t\t} else {\n\t\t\tif (ver >= 3) {\n\t\t\t\tthermal_read_mode = TPACPI_THERMAL_TPEC_8;\n\t\t\t\tthermal_use_labels = true;\n\t\t\t} else {\n\t\t\t\tthermal_read_mode =\n\t\t\t\t\t(ta2 != 0) ?\n\t\t\t\t\tTPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;\n\t\t\t}\n\t\t}\n\t} else if (acpi_tmp7) {\n\t\tif (tpacpi_is_ibm() &&\n\t\t acpi_evalf(ec_handle, NULL, \"UPDT\", \"qv\")) {\n\t\t\t/* 600e/x, 770e, 770x */\n\t\t\tthermal_read_mode = TPACPI_THERMAL_ACPI_UPDT;\n\t\t} else {\n\t\t\t/* IBM/LENOVO DSDT EC.TMPx access, max 8 sensors */\n\t\t\tthermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;\n\t\t}\n\t} else {\n\t\t/* temperatures not supported on 570, G4x, R30, R31, R32 */\n\t\tthermal_read_mode = TPACPI_THERMAL_NONE;\n\t}\n\n\tvdbg_printk(TPACPI_DBG_INIT, \"thermal is %s, mode %d\\n\",\n\t\tstr_supported(thermal_read_mode != TPACPI_THERMAL_NONE),\n\t\tthermal_read_mode);\n\n\treturn thermal_read_mode != TPACPI_THERMAL_NONE ? 0 : -ENODEV;\n}", "static void bpf_tramp_image_put(struct bpf_tramp_image *im)\n{\n\t/* The trampoline image that calls original function is using:\n\t * rcu_read_lock_trace to protect sleepable bpf progs\n\t * rcu_read_lock to protect normal bpf progs": "static void bpf_tramp_image_put(struct bpf_tramp_image *im)\n{\n\t/* The trampoline image that calls original function is using:\n\t * rcu_read_lock_trace to protect sleepable bpf progs\n\t * rcu_read_lock to protect normal bpf progs\n\t * percpu_ref to protect trampoline itself\n\t * rcu tasks to protect trampoline asm not covered by percpu_ref\n\t * (which are few asm insns before __bpf_tramp_enter and\n\t * after __bpf_tramp_exit)\n\t *\n\t * The trampoline is unreachable before bpf_tramp_image_put().\n\t *\n\t * First, patch the trampoline to avoid calling into fexit progs.\n\t * The progs will be freed even if the original function is still\n\t * executing or sleeping.\n\t * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on\n\t * first few asm instructions to execute and call into\n\t * __bpf_tramp_enter->percpu_ref_get.\n\t * Then use percpu_ref_kill to wait for the trampoline and the original\n\t * function to finish.\n\t * Then use call_rcu_tasks() to make sure few asm insns in\n\t * the trampoline epilogue are done as well.\n\t *\n\t * In !PREEMPT case the task that got interrupted in the first asm\n\t * insns won't go through an RCU quiescent state which the\n\t * percpu_ref_kill will be waiting for. Hence the first\n\t * call_rcu_tasks() is not necessary.\n\t */\n\tif (im->ip_after_call) {\n\t\tint err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,\n\t\t\t\t\t NULL, im->ip_epilogue);\n\t\tWARN_ON(err);\n\t\tif (IS_ENABLED(CONFIG_PREEMPTION))\n\t\t\tcall_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);\n\t\telse\n\t\t\tpercpu_ref_kill(&im->pcref);\n\t\treturn;\n\t}\n\n\t/* The trampoline without fexit and fmod_ret progs doesn't call original\n\t * function and doesn't use percpu_ref.\n\t * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.\n\t * Then use call_rcu_tasks() to wait for the rest of trampoline asm\n\t * and normal progs.\n\t */\n\tcall_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);\n}", "static int __init hotkey_init(struct ibm_init_struct *iibm)\n{\n\t/* Requirements for changing the default keymaps:\n\t *\n\t * 1. Many of the keys are mapped to KEY_RESERVED for very": "static int __init hotkey_init(struct ibm_init_struct *iibm)\n{\n\t/* Requirements for changing the default keymaps:\n\t *\n\t * 1. Many of the keys are mapped to KEY_RESERVED for very\n\t * good reasons. Do not change them unless you have deep\n\t * knowledge on the IBM and Lenovo ThinkPad firmware for\n\t * the various ThinkPad models. The driver behaves\n\t * differently for KEY_RESERVED: such keys have their\n\t * hot key mask *unset* in mask_recommended, and also\n\t * in the initial hot key mask programmed into the\n\t * firmware at driver load time, which means the firm-\n\t * ware may react very differently if you change them to\n\t * something else;\n\t *\n\t * 2. You must be subscribed to the linux-thinkpad and\n\t * ibm-acpi-devel mailing lists, and you should read the\n\t * list archives since 2007 if you want to change the\n\t * keymaps. This requirement exists so that you will\n\t * know the past history of problems with the thinkpad-\n\t * acpi driver keymaps, and also that you will be\n\t * listening to any bug reports;\n\t *\n\t * 3. Do not send thinkpad-acpi specific patches directly to\n\t * for merging, *ever*. Send them to the linux-acpi\n\t * mailinglist for comments. Merging is to be done only\n\t * through acpi-test and the ACPI maintainer.\n\t *\n\t * If the above is too much to ask, don't change the keymap.\n\t * Ask the thinkpad-acpi maintainer to do it, instead.\n\t */\n\n\tenum keymap_index {\n\t\tTPACPI_KEYMAP_IBM_GENERIC = 0,\n\t\tTPACPI_KEYMAP_LENOVO_GENERIC,\n\t};\n\n\tstatic const tpacpi_keymap_t tpacpi_keymaps[] __initconst = {\n\t/* Generic keymap for IBM ThinkPads */\n\t[TPACPI_KEYMAP_IBM_GENERIC] = {\n\t\t/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */\n\t\tKEY_FN_F1,\tKEY_BATTERY,\tKEY_COFFEE,\tKEY_SLEEP,\n\t\tKEY_WLAN,\tKEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,\n\t\tKEY_FN_F9,\tKEY_FN_F10,\tKEY_FN_F11,\tKEY_SUSPEND,\n\n\t\t/* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */\n\t\tKEY_UNKNOWN,\t/* 0x0C: FN+BACKSPACE */\n\t\tKEY_UNKNOWN,\t/* 0x0D: FN+INSERT */\n\t\tKEY_UNKNOWN,\t/* 0x0E: FN+DELETE */\n\n\t\t/* brightness: firmware always reacts to them */\n\t\tKEY_RESERVED,\t/* 0x0F: FN+HOME (brightness up) */\n\t\tKEY_RESERVED,\t/* 0x10: FN+END (brightness down) */\n\n\t\t/* Thinklight: firmware always react to it */\n\t\tKEY_RESERVED,\t/* 0x11: FN+PGUP (thinklight toggle) */\n\n\t\tKEY_UNKNOWN,\t/* 0x12: FN+PGDOWN */\n\t\tKEY_ZOOM,\t/* 0x13: FN+SPACE (zoom) */\n\n\t\t/* Volume: firmware always react to it and reprograms\n\t\t * the built-in *extra* mixer. Never map it to control\n\t\t * another mixer by default. */\n\t\tKEY_RESERVED,\t/* 0x14: VOLUME UP */\n\t\tKEY_RESERVED,\t/* 0x15: VOLUME DOWN */\n\t\tKEY_RESERVED,\t/* 0x16: MUTE */\n\n\t\tKEY_VENDOR,\t/* 0x17: Thinkpad/AccessIBM/Lenovo */\n\n\t\t/* (assignments unknown, please report if found) */\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\n\t\t/* No assignments, only used for Adaptive keyboards. */\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\n\t\t/* No assignment, used for newer Lenovo models */\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN\n\n\t\t},\n\n\t/* Generic keymap for Lenovo ThinkPads */\n\t[TPACPI_KEYMAP_LENOVO_GENERIC] = {\n\t\t/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */\n\t\tKEY_FN_F1,\tKEY_COFFEE,\tKEY_BATTERY,\tKEY_SLEEP,\n\t\tKEY_WLAN,\tKEY_CAMERA, KEY_SWITCHVIDEOMODE, KEY_FN_F8,\n\t\tKEY_FN_F9,\tKEY_FN_F10,\tKEY_FN_F11,\tKEY_SUSPEND,\n\n\t\t/* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */\n\t\tKEY_UNKNOWN,\t/* 0x0C: FN+BACKSPACE */\n\t\tKEY_UNKNOWN,\t/* 0x0D: FN+INSERT */\n\t\tKEY_UNKNOWN,\t/* 0x0E: FN+DELETE */\n\n\t\t/* These should be enabled --only-- when ACPI video\n\t\t * is disabled (i.e. in \"vendor\" mode), and are handled\n\t\t * in a special way by the init code */\n\t\tKEY_BRIGHTNESSUP,\t/* 0x0F: FN+HOME (brightness up) */\n\t\tKEY_BRIGHTNESSDOWN,\t/* 0x10: FN+END (brightness down) */\n\n\t\tKEY_RESERVED,\t/* 0x11: FN+PGUP (thinklight toggle) */\n\n\t\tKEY_UNKNOWN,\t/* 0x12: FN+PGDOWN */\n\t\tKEY_ZOOM,\t/* 0x13: FN+SPACE (zoom) */\n\n\t\t/* Volume: z60/z61, T60 (BIOS version?): firmware always\n\t\t * react to it and reprograms the built-in *extra* mixer.\n\t\t * Never map it to control another mixer by default.\n\t\t *\n\t\t * T60?, T61, R60?, R61: firmware and EC tries to send\n\t\t * these over the regular keyboard, so these are no-ops,\n\t\t * but there are still weird bugs re. MUTE, so do not\n\t\t * change unless you get test reports from all Lenovo\n\t\t * models. May cause the BIOS to interfere with the\n\t\t * HDA mixer.\n\t\t */\n\t\tKEY_RESERVED,\t/* 0x14: VOLUME UP */\n\t\tKEY_RESERVED,\t/* 0x15: VOLUME DOWN */\n\t\tKEY_RESERVED,\t/* 0x16: MUTE */\n\n\t\tKEY_VENDOR,\t/* 0x17: Thinkpad/AccessIBM/Lenovo */\n\n\t\t/* (assignments unknown, please report if found) */\n\t\tKEY_UNKNOWN, KEY_UNKNOWN,\n\n\t\t/*\n\t\t * The mic mute button only sends 0x1a. It does not\n\t\t * automatically mute the mic or change the mute light.\n\t\t */\n\t\tKEY_MICMUTE,\t/* 0x1a: Mic mute (since ?400 or so) */\n\n\t\t/* (assignments unknown, please report if found) */\n\t\tKEY_UNKNOWN,\n\n\t\t/* Extra keys in use since the X240 / T440 / T540 */\n\t\tKEY_CONFIG, KEY_SEARCH, KEY_SCALE, KEY_FILE,\n\n\t\t/*\n\t\t * These are the adaptive keyboard keycodes for Carbon X1 2014.\n\t\t * The first item in this list is the Mute button which is\n\t\t * emitted with 0x103 through\n\t\t * adaptive_keyboard_hotkey_notify_hotkey() when the sound\n\t\t * symbol is held.\n\t\t * We'll need to offset those by 0x20.\n\t\t */\n\t\tKEY_RESERVED, /* Mute held, 0x103 */\n\t\tKEY_BRIGHTNESS_MIN, /* Backlight off */\n\t\tKEY_RESERVED, /* Clipping tool */\n\t\tKEY_RESERVED, /* Cloud */\n\t\tKEY_RESERVED,\n\t\tKEY_VOICECOMMAND, /* Voice */\n\t\tKEY_RESERVED,\n\t\tKEY_RESERVED, /* Gestures */\n\t\tKEY_RESERVED,\n\t\tKEY_RESERVED,\n\t\tKEY_RESERVED,\n\t\tKEY_CONFIG, /* Settings */\n\t\tKEY_RESERVED, /* New tab */\n\t\tKEY_REFRESH, /* Reload */\n\t\tKEY_BACK, /* Back */\n\t\tKEY_RESERVED, /* Microphone down */\n\t\tKEY_RESERVED, /* Microphone up */\n\t\tKEY_RESERVED, /* Microphone cancellation */\n\t\tKEY_RESERVED, /* Camera mode */\n\t\tKEY_RESERVED, /* Rotate display, 0x116 */\n\n\t\t/*\n\t\t * These are found in 2017 models (e.g. T470s, X270).\n\t\t * The lowest known value is 0x311, which according to\n\t\t * the manual should launch a user defined favorite\n\t\t * application.\n\t\t *\n\t\t * The offset for these is TP_ACPI_HOTKEYSCAN_EXTENDED_START,\n\t\t * corresponding to 0x34.\n\t\t */\n\n\t\t/* (assignments unknown, please report if found) */\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,\n\t\tKEY_UNKNOWN,\n\n\t\tKEY_BOOKMARKS,\t\t\t/* Favorite app, 0x311 */\n\t\tKEY_SELECTIVE_SCREENSHOT,\t/* Clipping tool */\n\t\tKEY_CALC,\t\t\t/* Calculator (above numpad, P52) */\n\t\tKEY_BLUETOOTH,\t\t\t/* Bluetooth */\n\t\tKEY_KEYBOARD,\t\t\t/* Keyboard, 0x315 */\n\t\tKEY_FN_RIGHT_SHIFT,\t\t/* Fn + right Shift */\n\t\tKEY_NOTIFICATION_CENTER,\t/* Notification Center */\n\t\tKEY_PICKUP_PHONE,\t\t/* Answer incoming call */\n\t\tKEY_HANGUP_PHONE,\t\t/* Decline incoming call */\n\t\t},\n\t};\n\n\tstatic const struct tpacpi_quirk tpacpi_keymap_qtable[] __initconst = {\n\t\t/* Generic maps (fallback) */\n\t\t{\n\t\t .vendor = PCI_VENDOR_ID_IBM,\n\t\t .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,\n\t\t .quirks = TPACPI_KEYMAP_IBM_GENERIC,\n\t\t},\n\t\t{\n\t\t .vendor = PCI_VENDOR_ID_LENOVO,\n\t\t .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,\n\t\t .quirks = TPACPI_KEYMAP_LENOVO_GENERIC,\n\t\t},\n\t};\n\n#define TPACPI_HOTKEY_MAP_SIZE\t\tsizeof(tpacpi_keymap_t)\n#define TPACPI_HOTKEY_MAP_TYPESIZE\tsizeof(tpacpi_keymap_entry_t)\n\n\tint res, i;\n\tint status;\n\tint hkeyv;\n\tbool radiosw_state = false;\n\tbool tabletsw_state = false;\n\n\tunsigned long quirks;\n\tunsigned long keymap_id;\n\n\tvdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,\n\t\t\t\"initializing hotkey subdriver\\n\");\n\n\tBUG_ON(!tpacpi_inputdev);\n\tBUG_ON(tpacpi_inputdev->open != NULL ||\n\t tpacpi_inputdev->close != NULL);\n\n\tTPACPI_ACPIHANDLE_INIT(hkey);\n\tmutex_init(&hotkey_mutex);\n\n#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL\n\tmutex_init(&hotkey_thread_data_mutex);\n#endif\n\n\t/* hotkey not supported on 570 */\n\ttp_features.hotkey = hkey_handle != NULL;\n\n\tvdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,\n\t\t\"hotkeys are %s\\n\",\n\t\tstr_supported(tp_features.hotkey));\n\n\tif (!tp_features.hotkey)\n\t\treturn -ENODEV;\n\n\tquirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,\n\t\t\t\t ARRAY_SIZE(tpacpi_hotkey_qtable));\n\n\ttpacpi_disable_brightness_delay();\n\n\t/* mask not supported on 600e/x, 770e, 770x, A21e, A2xm/p,\n\t A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking\n\t for HKEY interface version 0x100 */\n\tif (acpi_evalf(hkey_handle, &hkeyv, \"MHKV\", \"qd\")) {\n\t\tvdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,\n\t\t\t \"firmware HKEY interface version: 0x%x\\n\",\n\t\t\t hkeyv);\n\n\t\tswitch (hkeyv >> 8) {\n\t\tcase 1:\n\t\t\t/*\n\t\t\t * MHKV 0x100 in A31, R40, R40e,\n\t\t\t * T4x, X31, and later\n\t\t\t */\n\n\t\t\t/* Paranoia check AND init hotkey_all_mask */\n\t\t\tif (!acpi_evalf(hkey_handle, &hotkey_all_mask,\n\t\t\t\t\t\"MHKA\", \"qd\")) {\n\t\t\t\tpr_err(\"missing MHKA handler, please report this to %s\\n\",\n\t\t\t\t TPACPI_MAIL);\n\t\t\t\t/* Fallback: pre-init for FN+F3,F4,F12 */\n\t\t\t\thotkey_all_mask = 0x080cU;\n\t\t\t} else {\n\t\t\t\ttp_features.hotkey_mask = 1;\n\t\t\t}\n\t\t\tbreak;\n\n\t\tcase 2:\n\t\t\t/*\n\t\t\t * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016)\n\t\t\t */\n\n\t\t\t/* Paranoia check AND init hotkey_all_mask */\n\t\t\tif (!acpi_evalf(hkey_handle, &hotkey_all_mask,\n\t\t\t\t\t\"MHKA\", \"dd\", 1)) {\n\t\t\t\tpr_err(\"missing MHKA handler, please report this to %s\\n\",\n\t\t\t\t TPACPI_MAIL);\n\t\t\t\t/* Fallback: pre-init for FN+F3,F4,F12 */\n\t\t\t\thotkey_all_mask = 0x080cU;\n\t\t\t} else {\n\t\t\t\ttp_features.hotkey_mask = 1;\n\t\t\t}\n\n\t\t\t/*\n\t\t\t * Check if we have an adaptive keyboard, like on the\n\t\t\t * Lenovo Carbon X1 2014 (2nd Gen).\n\t\t\t */\n\t\t\tif (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask,\n\t\t\t\t \"MHKA\", \"dd\", 2)) {\n\t\t\t\tif (hotkey_adaptive_all_mask != 0)\n\t\t\t\t\ttp_features.has_adaptive_kbd = true;\n\t\t\t} else {\n\t\t\t\ttp_features.has_adaptive_kbd = false;\n\t\t\t\thotkey_adaptive_all_mask = 0x0U;\n\t\t\t}\n\t\t\tbreak;\n\n\t\tdefault:\n\t\t\tpr_err(\"unknown version of the HKEY interface: 0x%x\\n\",\n\t\t\t hkeyv);\n\t\t\tpr_err(\"please report this to %s\\n\", TPACPI_MAIL);\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tvdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,\n\t\t\"hotkey masks are %s\\n\",\n\t\tstr_supported(tp_features.hotkey_mask));\n\n\t/* Init hotkey_all_mask if not initialized yet */\n\tif (!tp_features.hotkey_mask && !hotkey_all_mask &&\n\t (quirks & TPACPI_HK_Q_INIMASK))\n\t\thotkey_all_mask = 0x080cU; /* FN+F12, FN+F4, FN+F3 */\n\n\t/* Init hotkey_acpi_mask and hotkey_orig_mask */\n\tif (tp_features.hotkey_mask) {\n\t\t/* hotkey_source_mask *must* be zero for\n\t\t * the first hotkey_mask_get to return hotkey_orig_mask */\n\t\tres = hotkey_mask_get();\n\t\tif (res)\n\t\t\treturn res;\n\n\t\thotkey_orig_mask = hotkey_acpi_mask;\n\t} else {\n\t\thotkey_orig_mask = hotkey_all_mask;\n\t\thotkey_acpi_mask = hotkey_all_mask;\n\t}\n\n#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES\n\tif (dbg_wlswemul) {\n\t\ttp_features.hotkey_wlsw = 1;\n\t\tradiosw_state = !!tpacpi_wlsw_emulstate;\n\t\tpr_info(\"radio switch emulation enabled\\n\");\n\t} else\n#endif\n\t/* Not all thinkpads have a hardware radio switch */\n\tif (acpi_evalf(hkey_handle, &status, \"WLSW\", \"qd\")) {\n\t\ttp_features.hotkey_wlsw = 1;\n\t\tradiosw_state = !!status;\n\t\tpr_info(\"radio switch found; radios are %s\\n\",\n\t\t\tenabled(status, 0));\n\t}\n\n\ttabletsw_state = hotkey_init_tablet_mode();\n\n\t/* Set up key map */\n\tkeymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable,\n\t\t\t\t\tARRAY_SIZE(tpacpi_keymap_qtable));\n\tBUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps));\n\tdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,\n\t\t \"using keymap number %lu\\n\", keymap_id);\n\n\thotkey_keycode_map = kmemdup(&tpacpi_keymaps[keymap_id],\n\t\t\tTPACPI_HOTKEY_MAP_SIZE,\tGFP_KERNEL);\n\tif (!hotkey_keycode_map) {\n\t\tpr_err(\"failed to allocate memory for key map\\n\");\n\t\treturn -ENOMEM;\n\t}\n\n\tinput_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);\n\ttpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;\n\ttpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN;\n\ttpacpi_inputdev->keycode = hotkey_keycode_map;\n\tfor (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) {\n\t\tif (hotkey_keycode_map[i] != KEY_RESERVED) {\n\t\t\tinput_set_capability(tpacpi_inputdev, EV_KEY,\n\t\t\t\t\t\thotkey_keycode_map[i]);\n\t\t} else {\n\t\t\tif (i < sizeof(hotkey_reserved_mask)*8)\n\t\t\t\thotkey_reserved_mask |= 1 << i;\n\t\t}\n\t}\n\n\tif (tp_features.hotkey_wlsw) {\n\t\tinput_set_capability(tpacpi_inputdev, EV_SW, SW_RFKILL_ALL);\n\t\tinput_report_switch(tpacpi_inputdev,\n\t\t\t\t SW_RFKILL_ALL, radiosw_state);\n\t}\n\tif (tp_features.hotkey_tablet) {\n\t\tinput_set_capability(tpacpi_inputdev, EV_SW, SW_TABLET_MODE);\n\t\tinput_report_switch(tpacpi_inputdev,\n\t\t\t\t SW_TABLET_MODE, tabletsw_state);\n\t}\n\n\t/* Do not issue duplicate brightness change events to\n\t * userspace. tpacpi_detect_brightness_capabilities() must have\n\t * been called before this point */\n\tif (acpi_video_get_backlight_type() != acpi_backlight_vendor) {\n\t\tpr_info(\"This ThinkPad has standard ACPI backlight brightness control, supported by the ACPI video driver\\n\");\n\t\tpr_notice(\"Disabling thinkpad-acpi brightness events by default...\\n\");\n\n\t\t/* Disable brightness up/down on Lenovo thinkpads when\n\t\t * ACPI is handling them, otherwise it is plain impossible\n\t\t * for userspace to do something even remotely sane */\n\t\thotkey_reserved_mask |=\n\t\t\t(1 << TP_ACPI_HOTKEYSCAN_FNHOME)\n\t\t\t| (1 << TP_ACPI_HOTKEYSCAN_FNEND);\n\t\thotkey_unmap(TP_ACPI_HOTKEYSCAN_FNHOME);\n\t\thotkey_unmap(TP_ACPI_HOTKEYSCAN_FNEND);\n\t}\n\n#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL\n\thotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK\n\t\t\t\t& ~hotkey_all_mask\n\t\t\t\t& ~hotkey_reserved_mask;\n\n\tvdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,\n\t\t \"hotkey source mask 0x%08x, polling freq %u\\n\",\n\t\t hotkey_source_mask, hotkey_poll_freq);\n#endif\n\n\tdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,\n\t\t\t\"enabling firmware HKEY event interface...\\n\");\n\tres = hotkey_status_set(true);\n\tif (res) {\n\t\thotkey_exit();\n\t\treturn res;\n\t}\n\tres = hotkey_mask_set(((hotkey_all_mask & ~hotkey_reserved_mask)\n\t\t\t | hotkey_driver_mask)\n\t\t\t & ~hotkey_source_mask);\n\tif (res < 0 && res != -ENXIO) {\n\t\thotkey_exit();\n\t\treturn res;\n\t}\n\thotkey_user_mask = (hotkey_acpi_mask | hotkey_source_mask)\n\t\t\t\t& ~hotkey_reserved_mask;\n\tvdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,\n\t\t\"initial masks: user=0x%08x, fw=0x%08x, poll=0x%08x\\n\",\n\t\thotkey_user_mask, hotkey_acpi_mask, hotkey_source_mask);\n\n\ttpacpi_inputdev->open = &hotkey_inputdev_open;\n\ttpacpi_inputdev->close = &hotkey_inputdev_close;\n\n\thotkey_poll_setup_safe(true);\n\n\treturn 0;\n}", "static void __simple_estab(bool exprm)\n{\n\tstruct bpf_link *link;\n\tstruct sk_fds sk_fds;\n": "static void __simple_estab(bool exprm)\n{\n\tstruct bpf_link *link;\n\tstruct sk_fds sk_fds;\n\n\thdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);\n\tlport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);\n\n\texp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;\n\texp_passive_estab_in.rand = 0xfa;\n\texp_passive_estab_in.max_delack_ms = 11;\n\n\texp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;\n\texp_active_estab_in.rand = 0xce;\n\texp_active_estab_in.max_delack_ms = 22;\n\n\tprepare_out();\n\n\tif (!exprm) {\n\t\tskel->data->test_kind = 0xB9;\n\t\tskel->data->test_magic = 0;\n\t}\n\n\tif (write_sysctl(\"/proc/sys/net/ipv4/tcp_syncookies\", \"1\"))\n\t\treturn;\n\n\tlink = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);\n\tif (!ASSERT_OK_PTR(link, \"attach_cgroup(estab)\"))\n\t\treturn;\n\n\tif (sk_fds_connect(&sk_fds, false)) {\n\t\tbpf_link__destroy(link);\n\t\treturn;\n\t}\n\n\tcheck_hdr_and_close_fds(&sk_fds);\n\tbpf_link__destroy(link);\n}", "static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)\n{\n\t/* Reset */\n\n\t/* after checking the datasheets for SST, MACRONIX and ATMEL": "static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)\n{\n\t/* Reset */\n\n\t/* after checking the datasheets for SST, MACRONIX and ATMEL\n\t * (oh and incidentaly the jedec spec - 3.5.3.3) the reset\n\t * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at\n\t * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips\n\t * as they will ignore the writes and don't care what address\n\t * the F0 is written to */\n\tif (cfi->addr_unlock1) {\n\t\tpr_debug( \"reset unlock called %x %x \\n\",\n\t\t cfi->addr_unlock1,cfi->addr_unlock2);\n\t\tcfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);\n\t\tcfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);\n\t}\n\n\tcfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);\n\t/* Some misdesigned Intel chips do not respond for 0xF0 for a reset,\n\t * so ensure we're in read mode. Send both the Intel and the AMD command\n\t * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so\n\t * this should be safe.\n\t */\n\tcfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);\n\t/* FIXME - should have reset delay before continuing */\n}", "static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)\n{\n\tstruct npc_lt_def_cfg defs, *ltdefs;\n\n\tltdefs = &defs;": "static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)\n{\n\tstruct npc_lt_def_cfg defs, *ltdefs;\n\n\tltdefs = &defs;\n\tmemcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));\n\n\t/* Extract PCP and DEI fields from outer VLAN from byte offset\n\t * 2 from the start of LB_PTR (ie TAG).\n\t * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN\n\t * fields are considered when 'Tunnel enable' is set in profile.\n\t */\n\trvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,\n\t\t (2UL << 12) | (ltdefs->ovlan.lid << 8) |\n\t\t (ltdefs->ovlan.ltype_match << 4) |\n\t\t ltdefs->ovlan.ltype_mask);\n\trvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,\n\t\t (2UL << 12) | (ltdefs->ivlan.lid << 8) |\n\t\t (ltdefs->ivlan.ltype_match << 4) |\n\t\t ltdefs->ivlan.ltype_mask);\n\n\t/* DSCP field in outer and tunneled IPv4 packets */\n\trvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,\n\t\t (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |\n\t\t (ltdefs->rx_oip4.ltype_match << 4) |\n\t\t ltdefs->rx_oip4.ltype_mask);\n\trvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,\n\t\t (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |\n\t\t (ltdefs->rx_iip4.ltype_match << 4) |\n\t\t ltdefs->rx_iip4.ltype_mask);\n\n\t/* DSCP field (traffic class) in outer and tunneled IPv6 packets */\n\trvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,\n\t\t (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |\n\t\t (ltdefs->rx_oip6.ltype_match << 4) |\n\t\t ltdefs->rx_oip6.ltype_mask);\n\trvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,\n\t\t (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |\n\t\t (ltdefs->rx_iip6.ltype_match << 4) |\n\t\t ltdefs->rx_iip6.ltype_mask);\n}", "static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)\n{\n\tstruct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);\n\tstruct smc_connection *conn = &smc->conn;\n\tstruct smc_link_group *lgr = conn->lgr;": "static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)\n{\n\tstruct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);\n\tstruct smc_connection *conn = &smc->conn;\n\tstruct smc_link_group *lgr = conn->lgr;\n\tstruct list_head *buf_list;\n\tint bufsize, bufsize_short;\n\tbool is_dgraded = false;\n\tstruct mutex *lock;\t/* lock buffer list */\n\tint sk_buf_size;\n\n\tif (is_rmb)\n\t\t/* use socket recv buffer size (w/o overhead) as start value */\n\t\tsk_buf_size = smc->sk.sk_rcvbuf / 2;\n\telse\n\t\t/* use socket send buffer size (w/o overhead) as start value */\n\t\tsk_buf_size = smc->sk.sk_sndbuf / 2;\n\n\tfor (bufsize_short = smc_compress_bufsize(sk_buf_size, is_smcd, is_rmb);\n\t bufsize_short >= 0; bufsize_short--) {\n\t\tif (is_rmb) {\n\t\t\tlock = &lgr->rmbs_lock;\n\t\t\tbuf_list = &lgr->rmbs[bufsize_short];\n\t\t} else {\n\t\t\tlock = &lgr->sndbufs_lock;\n\t\t\tbuf_list = &lgr->sndbufs[bufsize_short];\n\t\t}\n\t\tbufsize = smc_uncompress_bufsize(bufsize_short);\n\n\t\t/* check for reusable slot in the link group */\n\t\tbuf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);\n\t\tif (buf_desc) {\n\t\t\tSMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);\n\t\t\tSMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);\n\t\t\tbreak; /* found reusable slot */\n\t\t}\n\n\t\tif (is_smcd)\n\t\t\tbuf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);\n\t\telse\n\t\t\tbuf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);\n\n\t\tif (PTR_ERR(buf_desc) == -ENOMEM)\n\t\t\tbreak;\n\t\tif (IS_ERR(buf_desc)) {\n\t\t\tif (!is_dgraded) {\n\t\t\t\tis_dgraded = true;\n\t\t\t\tSMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb);\n\t\t\t}\n\t\t\tcontinue;\n\t\t}\n\n\t\tSMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);\n\t\tSMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);\n\t\tbuf_desc->used = 1;\n\t\tmutex_lock(lock);\n\t\tlist_add(&buf_desc->list, buf_list);\n\t\tmutex_unlock(lock);\n\t\tbreak; /* found */\n\t}\n\n\tif (IS_ERR(buf_desc))\n\t\treturn PTR_ERR(buf_desc);\n\n\tif (!is_smcd) {\n\t\tif (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {\n\t\t\tsmcr_buf_unuse(buf_desc, lgr);\n\t\t\treturn -ENOMEM;\n\t\t}\n\t}\n\n\tif (is_rmb) {\n\t\tconn->rmb_desc = buf_desc;\n\t\tconn->rmbe_size_short = bufsize_short;\n\t\tsmc->sk.sk_rcvbuf = bufsize * 2;\n\t\tatomic_set(&conn->bytes_to_rcv, 0);\n\t\tconn->rmbe_update_limit =\n\t\t\tsmc_rmb_wnd_update_limit(buf_desc->len);\n\t\tif (is_smcd)\n\t\t\tsmc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */\n\t} else {\n\t\tconn->sndbuf_desc = buf_desc;\n\t\tsmc->sk.sk_sndbuf = bufsize * 2;\n\t\tatomic_set(&conn->sndbuf_space, bufsize);\n\t}\n\treturn 0;\n}", "static void rtl_hw_start_8168ep(struct rtl8169_private *tp)\n{\n\trtl8168ep_stop_cmac(tp);\n\n\trtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);": "static void rtl_hw_start_8168ep(struct rtl8169_private *tp)\n{\n\trtl8168ep_stop_cmac(tp);\n\n\trtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);\n\trtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);\n\n\trtl_set_def_aspm_entry_latency(tp);\n\n\trtl_reset_packet_filter(tp);\n\n\trtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);\n\n\tRTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);\n\n\trtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);\n\trtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);\n\n\trtl8168_config_eee_mac(tp);\n\n\trtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);\n\n\tRTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);\n\n\trtl_pcie_state_l2l3_disable(tp);\n}", "static int video_start(struct ia_css_pipe *pipe)\n{\n\tint err = 0;\n\tstruct ia_css_pipe *copy_pipe, *capture_pipe;\n\tenum sh_css_pipe_config_override copy_ovrd;": "static int video_start(struct ia_css_pipe *pipe)\n{\n\tint err = 0;\n\tstruct ia_css_pipe *copy_pipe, *capture_pipe;\n\tenum sh_css_pipe_config_override copy_ovrd;\n\tenum ia_css_input_mode video_pipe_input_mode;\n\tunsigned int thread_id;\n\n\tIA_CSS_ENTER_PRIVATE(\"pipe = %p\", pipe);\n\tif ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {\n\t\tIA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);\n\t\treturn -EINVAL;\n\t}\n\n\tvideo_pipe_input_mode = pipe->stream->config.mode;\n\n\tcopy_pipe = pipe->pipe_settings.video.copy_pipe;\n\tcapture_pipe = pipe->pipe_settings.video.capture_pipe;\n\n\tsh_css_metrics_start_frame();\n\n\t/* multi stream video needs mipi buffers */\n\n\terr = send_mipi_frames(pipe);\n\tif (err)\n\t\treturn err;\n\n\tsend_raw_frames(pipe);\n\n\tia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);\n\tcopy_ovrd = 1 << thread_id;\n\n\tif (pipe->stream->cont_capt) {\n\t\tia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),\n\t\t\t\t\t\t &thread_id);\n\t\tcopy_ovrd |= 1 << thread_id;\n\t}\n\n\t/* Construct and load the copy pipe */\n\tif (pipe->stream->config.continuous) {\n\t\tsh_css_sp_init_pipeline(©_pipe->pipeline,\n\t\t\t\t\tIA_CSS_PIPE_ID_COPY,\n\t\t\t\t\t(uint8_t)ia_css_pipe_get_pipe_num(copy_pipe),\n\t\t\t\t\tfalse,\n\t\t\t\t\tpipe->stream->config.pixels_per_clock == 2, false,\n\t\t\t\t\tfalse, pipe->required_bds_factor,\n\t\t\t\t\tcopy_ovrd,\n\t\t\t\t\tpipe->stream->config.mode,\n\t\t\t\t\t&pipe->stream->config.metadata_config,\n\t\t\t\t\t&pipe->stream->info.metadata_info,\n\t\t\t\t\tpipe->stream->config.source.port.port);\n\n\t\t/*\n\t\t * make the video pipe start with mem mode input, copy handles\n\t\t * the actual mode\n\t\t */\n\t\tvideo_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY;\n\t}\n\n\t/* Construct and load the capture pipe */\n\tif (pipe->stream->cont_capt) {\n\t\tsh_css_sp_init_pipeline(&capture_pipe->pipeline,\n\t\t\t\t\tIA_CSS_PIPE_ID_CAPTURE,\n\t\t\t\t\t(uint8_t)ia_css_pipe_get_pipe_num(capture_pipe),\n\t\t\t\t\tcapture_pipe->config.default_capture_config.enable_xnr != 0,\n\t\t\t\t\tcapture_pipe->stream->config.pixels_per_clock == 2,\n\t\t\t\t\ttrue, /* continuous */\n\t\t\t\t\tfalse, /* offline */\n\t\t\t\t\tcapture_pipe->required_bds_factor,\n\t\t\t\t\t0,\n\t\t\t\t\tIA_CSS_INPUT_MODE_MEMORY,\n\t\t\t\t\t&pipe->stream->config.metadata_config,\n\t\t\t\t\t&pipe->stream->info.metadata_info,\n\t\t\t\t\t(enum mipi_port_id)0);\n\t}\n\n\tstart_pipe(pipe, copy_ovrd, video_pipe_input_mode);\n\n\tIA_CSS_LEAVE_ERR_PRIVATE(err);\n\treturn err;\n}", "static int cs42l42_manual_hs_det(struct sub_codec *cs42l42)\n{\n\tunsigned int hs_det_status;\n\tunsigned int hs_det_comp1;\n\tunsigned int hs_det_comp2;": "static int cs42l42_manual_hs_det(struct sub_codec *cs42l42)\n{\n\tunsigned int hs_det_status;\n\tunsigned int hs_det_comp1;\n\tunsigned int hs_det_comp2;\n\tunsigned int hs_det_sw;\n\tunsigned int hs_type;\n\n\t/* Set hs detect to manual, active mode */\n\tcs8409_i2c_write(cs42l42, CS42L42_HSDET_CTL2,\n\t\t\t (1 << CS42L42_HSDET_CTRL_SHIFT) |\n\t\t\t (0 << CS42L42_HSDET_SET_SHIFT) |\n\t\t\t (0 << CS42L42_HSBIAS_REF_SHIFT) |\n\t\t\t (0 << CS42L42_HSDET_AUTO_TIME_SHIFT));\n\n\t/* Configure HS DET comparator reference levels. */\n\tcs8409_i2c_write(cs42l42, CS42L42_HSDET_CTL1,\n\t\t\t (CS42L42_HSDET_COMP1_LVL_VAL << CS42L42_HSDET_COMP1_LVL_SHIFT) |\n\t\t\t (CS42L42_HSDET_COMP2_LVL_VAL << CS42L42_HSDET_COMP2_LVL_SHIFT));\n\n\t/* Open the SW_HSB_HS3 switch and close SW_HSB_HS4 for a Type 1 headset. */\n\tcs8409_i2c_write(cs42l42, CS42L42_HS_SWITCH_CTL, CS42L42_HSDET_SW_COMP1);\n\n\tmsleep(100);\n\n\ths_det_status = cs8409_i2c_read(cs42l42, CS42L42_HS_DET_STATUS);\n\n\ths_det_comp1 = (hs_det_status & CS42L42_HSDET_COMP1_OUT_MASK) >>\n\t\t\tCS42L42_HSDET_COMP1_OUT_SHIFT;\n\ths_det_comp2 = (hs_det_status & CS42L42_HSDET_COMP2_OUT_MASK) >>\n\t\t\tCS42L42_HSDET_COMP2_OUT_SHIFT;\n\n\t/* Close the SW_HSB_HS3 switch for a Type 2 headset. */\n\tcs8409_i2c_write(cs42l42, CS42L42_HS_SWITCH_CTL, CS42L42_HSDET_SW_COMP2);\n\n\tmsleep(100);\n\n\ths_det_status = cs8409_i2c_read(cs42l42, CS42L42_HS_DET_STATUS);\n\n\ths_det_comp1 |= ((hs_det_status & CS42L42_HSDET_COMP1_OUT_MASK) >>\n\t\t\tCS42L42_HSDET_COMP1_OUT_SHIFT) << 1;\n\ths_det_comp2 |= ((hs_det_status & CS42L42_HSDET_COMP2_OUT_MASK) >>\n\t\t\tCS42L42_HSDET_COMP2_OUT_SHIFT) << 1;\n\n\t/* Use Comparator 1 with 1.25V Threshold. */\n\tswitch (hs_det_comp1) {\n\tcase CS42L42_HSDET_COMP_TYPE1:\n\t\ths_type = CS42L42_PLUG_CTIA;\n\t\ths_det_sw = CS42L42_HSDET_SW_TYPE1;\n\t\tbreak;\n\tcase CS42L42_HSDET_COMP_TYPE2:\n\t\ths_type = CS42L42_PLUG_OMTP;\n\t\ths_det_sw = CS42L42_HSDET_SW_TYPE2;\n\t\tbreak;\n\tdefault:\n\t\t/* Fallback to Comparator 2 with 1.75V Threshold. */\n\t\tswitch (hs_det_comp2) {\n\t\tcase CS42L42_HSDET_COMP_TYPE1:\n\t\t\ths_type = CS42L42_PLUG_CTIA;\n\t\t\ths_det_sw = CS42L42_HSDET_SW_TYPE1;\n\t\t\tbreak;\n\t\tcase CS42L42_HSDET_COMP_TYPE2:\n\t\t\ths_type = CS42L42_PLUG_OMTP;\n\t\t\ths_det_sw = CS42L42_HSDET_SW_TYPE2;\n\t\t\tbreak;\n\t\tcase CS42L42_HSDET_COMP_TYPE3:\n\t\t\ths_type = CS42L42_PLUG_HEADPHONE;\n\t\t\ths_det_sw = CS42L42_HSDET_SW_TYPE3;\n\t\t\tbreak;\n\t\tdefault:\n\t\t\ths_type = CS42L42_PLUG_INVALID;\n\t\t\ths_det_sw = CS42L42_HSDET_SW_TYPE4;\n\t\t\tbreak;\n\t\t}\n\t}\n\n\t/* Set Switches */\n\tcs8409_i2c_write(cs42l42, CS42L42_HS_SWITCH_CTL, hs_det_sw);\n\n\t/* Set HSDET mode to Manual\u2014Disabled */\n\tcs8409_i2c_write(cs42l42, CS42L42_HSDET_CTL2,\n\t\t\t (0 << CS42L42_HSDET_CTRL_SHIFT) |\n\t\t\t (0 << CS42L42_HSDET_SET_SHIFT) |\n\t\t\t (0 << CS42L42_HSBIAS_REF_SHIFT) |\n\t\t\t (0 << CS42L42_HSDET_AUTO_TIME_SHIFT));\n\n\t/* Configure HS DET comparator reference levels. */\n\tcs8409_i2c_write(cs42l42, CS42L42_HSDET_CTL1,\n\t\t\t (CS42L42_HSDET_COMP1_LVL_DEFAULT << CS42L42_HSDET_COMP1_LVL_SHIFT) |\n\t\t\t (CS42L42_HSDET_COMP2_LVL_DEFAULT << CS42L42_HSDET_COMP2_LVL_SHIFT));\n\n\treturn hs_type;\n}", "static void service_done_flag(struct dim2_hdm *dev, int ch_idx)\n{\n\tstruct hdm_channel *hdm_ch = dev->hch + ch_idx;\n\tstruct dim_ch_state_t st;\n\tstruct list_head *head;": "static void service_done_flag(struct dim2_hdm *dev, int ch_idx)\n{\n\tstruct hdm_channel *hdm_ch = dev->hch + ch_idx;\n\tstruct dim_ch_state_t st;\n\tstruct list_head *head;\n\tstruct mbo *mbo;\n\tint done_buffers;\n\tunsigned long flags;\n\tu8 *data;\n\n\tBUG_ON(!hdm_ch);\n\tBUG_ON(!hdm_ch->is_initialized);\n\n\tspin_lock_irqsave(&dim_lock, flags);\n\n\tdone_buffers = dim_get_channel_state(&hdm_ch->ch, &st)->done_buffers;\n\tif (!done_buffers) {\n\t\tspin_unlock_irqrestore(&dim_lock, flags);\n\t\treturn;\n\t}\n\n\tif (!dim_detach_buffers(&hdm_ch->ch, done_buffers)) {\n\t\tspin_unlock_irqrestore(&dim_lock, flags);\n\t\treturn;\n\t}\n\tspin_unlock_irqrestore(&dim_lock, flags);\n\n\thead = &hdm_ch->started_list;\n\n\twhile (done_buffers) {\n\t\tspin_lock_irqsave(&dim_lock, flags);\n\t\tif (list_empty(head)) {\n\t\t\tspin_unlock_irqrestore(&dim_lock, flags);\n\t\t\tpr_crit(\"hard error: started_mbo list is empty whereas DIM2 has sent buffers\\n\");\n\t\t\tbreak;\n\t\t}\n\n\t\tmbo = list_first_entry(head, struct mbo, list);\n\t\tlist_del(head->next);\n\t\tspin_unlock_irqrestore(&dim_lock, flags);\n\n\t\tdata = mbo->virt_address;\n\n\t\tif (hdm_ch->data_type == MOST_CH_ASYNC &&\n\t\t hdm_ch->direction == MOST_CH_RX &&\n\t\t PACKET_IS_NET_INFO(data)) {\n\t\t\tretrieve_netinfo(dev, mbo);\n\n\t\t\tspin_lock_irqsave(&dim_lock, flags);\n\t\t\tlist_add_tail(&mbo->list, &hdm_ch->pending_list);\n\t\t\tspin_unlock_irqrestore(&dim_lock, flags);\n\t\t} else {\n\t\t\tif (hdm_ch->data_type == MOST_CH_CONTROL ||\n\t\t\t hdm_ch->data_type == MOST_CH_ASYNC) {\n\t\t\t\tu32 const data_size =\n\t\t\t\t\t(u32)data[0] * 256 + data[1] + 2;\n\n\t\t\t\tmbo->processed_length =\n\t\t\t\t\tmin_t(u32, data_size,\n\t\t\t\t\t mbo->buffer_length);\n\t\t\t} else {\n\t\t\t\tmbo->processed_length = mbo->buffer_length;\n\t\t\t}\n\t\t\tmbo->status = MBO_SUCCESS;\n\t\t\tmbo->complete(mbo);\n\t\t}\n\n\t\tdone_buffers--;\n\t}\n}", "static bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)\n{\n\tbool retValue = false;\n\n#ifdef TODO": "static bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)\n{\n\tbool retValue = false;\n\n#ifdef TODO\n\t// Apply for 819u only\n#if (HAL_CODE_BASE == RTL8192)\n\n#if (DEV_BUS_TYPE == USB_INTERFACE)\n\t// Alway disable MCS15 by Jerry Chang's request.by Emily, 2008.04.15\n\tretValue = true;\n#elif (DEV_BUS_TYPE == PCI_INTERFACE)\n\t// Enable MCS15 if the peer is Cisco AP. by Emily, 2008.05.12\n//\tif(pBssDesc->bCiscoCapExist)\n//\t\tretValue = false;\n//\telse\n\t\tretValue = false;\n#endif\n#endif\n#endif\n\t// Jerry Chang suggest that 8190 1x2 does not need to disable MCS15\n\n\treturn retValue;\n}", "static void rtl_enable_exit_l1(struct rtl8169_private *tp)\n{\n\t/* Bits control which events trigger ASPM L1 exit:\n\t * Bit 12: rxdv\n\t * Bit 11: ltr_msg": "static void rtl_enable_exit_l1(struct rtl8169_private *tp)\n{\n\t/* Bits control which events trigger ASPM L1 exit:\n\t * Bit 12: rxdv\n\t * Bit 11: ltr_msg\n\t * Bit 10: txdma_poll\n\t * Bit 9: xadm\n\t * Bit 8: pktavi\n\t * Bit 7: txpla\n\t */\n\tswitch (tp->mac_version) {\n\tcase RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:\n\t\trtl_eri_set_bits(tp, 0xd4, 0x1f00);\n\t\tbreak;\n\tcase RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:\n\t\trtl_eri_set_bits(tp, 0xd4, 0x0c00);\n\t\tbreak;\n\tcase RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:\n\t\tr8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);\n\t\tbreak;\n\tdefault:\n\t\tbreak;\n\t}\n}", "static int tls_clone_plaintext_msg(struct sock *sk, int required)\n{\n\tstruct tls_context *tls_ctx = tls_get_ctx(sk);\n\tstruct tls_prot_info *prot = &tls_ctx->prot_info;\n\tstruct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);": "static int tls_clone_plaintext_msg(struct sock *sk, int required)\n{\n\tstruct tls_context *tls_ctx = tls_get_ctx(sk);\n\tstruct tls_prot_info *prot = &tls_ctx->prot_info;\n\tstruct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);\n\tstruct tls_rec *rec = ctx->open_rec;\n\tstruct sk_msg *msg_pl = &rec->msg_plaintext;\n\tstruct sk_msg *msg_en = &rec->msg_encrypted;\n\tint skip, len;\n\n\t/* We add page references worth len bytes from encrypted sg\n\t * at the end of plaintext sg. It is guaranteed that msg_en\n\t * has enough required room (ensured by caller).\n\t */\n\tlen = required - msg_pl->sg.size;\n\n\t/* Skip initial bytes in msg_en's data to be able to use\n\t * same offset of both plain and encrypted data.\n\t */\n\tskip = prot->prepend_size + msg_pl->sg.size;\n\n\treturn sk_msg_clone(sk, msg_pl, msg_en, skip, len);\n}", "static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val)\n{\n\tstruct prb_desc_ring *desc_ring = &e->rb->desc_ring;\n\tstruct prb_desc *d = to_desc(desc_ring, e->id);\n\tunsigned long prev_state_val = DESC_SV(e->id, desc_reserved);": "static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val)\n{\n\tstruct prb_desc_ring *desc_ring = &e->rb->desc_ring;\n\tstruct prb_desc *d = to_desc(desc_ring, e->id);\n\tunsigned long prev_state_val = DESC_SV(e->id, desc_reserved);\n\n\t/* Now the writer has finished all writing: LMM(_prb_commit:A) */\n\n\t/*\n\t * Set the descriptor as committed. See \"ABA Issues\" about why\n\t * cmpxchg() instead of set() is used.\n\t *\n\t * 1 Guarantee all record data is stored before the descriptor state\n\t * is stored as committed. A write memory barrier is sufficient\n\t * for this. This pairs with desc_read:B and desc_reopen_last:A.\n\t *\n\t * 2. Guarantee the descriptor state is stored as committed before\n\t * re-checking the head ID in order to possibly finalize this\n\t * descriptor. This pairs with desc_reserve:D.\n\t *\n\t * Memory barrier involvement:\n\t *\n\t * If prb_commit:A reads from desc_reserve:D, then\n\t * desc_make_final:A reads from _prb_commit:B.\n\t *\n\t * Relies on:\n\t *\n\t * MB _prb_commit:B to prb_commit:A\n\t * matching\n\t * MB desc_reserve:D to desc_make_final:A\n\t */\n\tif (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,\n\t\t\tDESC_SV(e->id, state_val))) { /* LMM(_prb_commit:B) */\n\t\tWARN_ON_ONCE(1);\n\t}\n\n\t/* Restore interrupts, the reserve/commit window is finished. */\n\tlocal_irq_restore(e->irqflags);\n}", "static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)\n{\n\tstruct ccci_header *ccci_h = (struct ccci_header *)skb->data;\n\tstruct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;\n\tstruct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl;": "static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)\n{\n\tstruct ccci_header *ccci_h = (struct ccci_header *)skb->data;\n\tstruct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;\n\tstruct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl;\n\tstruct device *dev = queue->md_ctrl->dev;\n\tconst struct t7xx_port_conf *port_conf;\n\tstruct t7xx_port *port;\n\tu16 seq_num, channel;\n\tint ret;\n\n\tchannel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status));\n\tif (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) {\n\t\tdev_err_ratelimited(dev, \"Packet drop on channel 0x%x, modem not ready\\n\", channel);\n\t\tgoto drop_skb;\n\t}\n\n\tport = t7xx_port_proxy_find_port(t7xx_dev, queue, channel);\n\tif (!port) {\n\t\tdev_err_ratelimited(dev, \"Packet drop on channel 0x%x, port not found\\n\", channel);\n\t\tgoto drop_skb;\n\t}\n\n\tseq_num = t7xx_port_next_rx_seq_num(port, ccci_h);\n\tport_conf = port->port_conf;\n\tskb_pull(skb, sizeof(*ccci_h));\n\n\tret = port_conf->ops->recv_skb(port, skb);\n\t/* Error indicates to try again later */\n\tif (ret) {\n\t\tskb_push(skb, sizeof(*ccci_h));\n\t\treturn ret;\n\t}\n\n\tport->seq_nums[MTK_RX] = seq_num;\n\treturn 0;\n\ndrop_skb:\n\tdev_kfree_skb_any(skb);\n\treturn 0;\n}", "static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)\n{\n\tstruct data_cmd_rsp_rd_sh_mem_ep_data_buffer_done_v2 *rd_done;\n\tstruct data_cmd_rsp_wr_sh_mem_ep_data_buffer_done_v2 *done;\n\tstruct apm_cmd_rsp_shared_mem_map_regions *rsp;": "static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)\n{\n\tstruct data_cmd_rsp_rd_sh_mem_ep_data_buffer_done_v2 *rd_done;\n\tstruct data_cmd_rsp_wr_sh_mem_ep_data_buffer_done_v2 *done;\n\tstruct apm_cmd_rsp_shared_mem_map_regions *rsp;\n\tstruct gpr_ibasic_rsp_result_t *result;\n\tstruct q6apm_graph *graph = priv;\n\tstruct gpr_hdr *hdr = &data->hdr;\n\tstruct device *dev = graph->dev;\n\tuint32_t client_event;\n\tphys_addr_t phys;\n\tint token;\n\n\tresult = data->payload;\n\n\tswitch (hdr->opcode) {\n\tcase DATA_CMD_RSP_WR_SH_MEM_EP_DATA_BUFFER_DONE_V2:\n\t\tclient_event = APM_CLIENT_EVENT_DATA_WRITE_DONE;\n\t\tmutex_lock(&graph->lock);\n\t\ttoken = hdr->token & APM_WRITE_TOKEN_MASK;\n\n\t\tdone = data->payload;\n\t\tphys = graph->rx_data.buf[token].phys;\n\t\tmutex_unlock(&graph->lock);\n\n\t\tif (lower_32_bits(phys) == done->buf_addr_lsw &&\n\t\t upper_32_bits(phys) == done->buf_addr_msw) {\n\t\t\tgraph->result.opcode = hdr->opcode;\n\t\t\tgraph->result.status = done->status;\n\t\t\tif (graph->cb)\n\t\t\t\tgraph->cb(client_event, hdr->token, data->payload, graph->priv);\n\t\t} else {\n\t\t\tdev_err(dev, \"WR BUFF Unexpected addr %08x-%08x\\n\", done->buf_addr_lsw,\n\t\t\t\tdone->buf_addr_msw);\n\t\t}\n\n\t\tbreak;\n\tcase APM_CMD_RSP_SHARED_MEM_MAP_REGIONS:\n\t\tgraph->result.opcode = hdr->opcode;\n\t\tgraph->result.status = 0;\n\t\trsp = data->payload;\n\n\t\tif (hdr->token == SNDRV_PCM_STREAM_PLAYBACK)\n\t\t\tgraph->rx_data.mem_map_handle = rsp->mem_map_handle;\n\t\telse\n\t\t\tgraph->tx_data.mem_map_handle = rsp->mem_map_handle;\n\n\t\twake_up(&graph->cmd_wait);\n\t\tbreak;\n\tcase DATA_CMD_RSP_RD_SH_MEM_EP_DATA_BUFFER_V2:\n\t\tclient_event = APM_CLIENT_EVENT_DATA_READ_DONE;\n\t\tmutex_lock(&graph->lock);\n\t\trd_done = data->payload;\n\t\tphys = graph->tx_data.buf[hdr->token].phys;\n\t\tmutex_unlock(&graph->lock);\n\n\t\tif (upper_32_bits(phys) == rd_done->buf_addr_msw &&\n\t\t lower_32_bits(phys) == rd_done->buf_addr_lsw) {\n\t\t\tgraph->result.opcode = hdr->opcode;\n\t\t\tgraph->result.status = rd_done->status;\n\t\t\tif (graph->cb)\n\t\t\t\tgraph->cb(client_event, hdr->token, data->payload, graph->priv);\n\t\t} else {\n\t\t\tdev_err(dev, \"RD BUFF Unexpected addr %08x-%08x\\n\", rd_done->buf_addr_lsw,\n\t\t\t\trd_done->buf_addr_msw);\n\t\t}\n\t\tbreak;\n\tcase DATA_CMD_WR_SH_MEM_EP_EOS_RENDERED:\n\t\tbreak;\n\tcase GPR_BASIC_RSP_RESULT:\n\t\tswitch (result->opcode) {\n\t\tcase APM_CMD_SHARED_MEM_UNMAP_REGIONS:\n\t\t\tgraph->result.opcode = result->opcode;\n\t\t\tgraph->result.status = 0;\n\t\t\tif (hdr->token == SNDRV_PCM_STREAM_PLAYBACK)\n\t\t\t\tgraph->rx_data.mem_map_handle = 0;\n\t\t\telse\n\t\t\t\tgraph->tx_data.mem_map_handle = 0;\n\n\t\t\twake_up(&graph->cmd_wait);\n\t\t\tbreak;\n\t\tcase APM_CMD_SHARED_MEM_MAP_REGIONS:\n\t\tcase DATA_CMD_WR_SH_MEM_EP_MEDIA_FORMAT:\n\t\tcase APM_CMD_SET_CFG:\n\t\t\tgraph->result.opcode = result->opcode;\n\t\t\tgraph->result.status = result->status;\n\t\t\tif (result->status)\n\t\t\t\tdev_err(dev, \"Error (%d) Processing 0x%08x cmd\\n\",\n\t\t\t\t\tresult->status, result->opcode);\n\t\t\twake_up(&graph->cmd_wait);\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tbreak;\n\t\t}\n\t\tbreak;\n\tdefault:\n\t\tbreak;\n\t}\n\treturn 0;\n}", "static void rockchip_snd_xfer_sync_reset(struct rk_i2s_tdm_dev *i2s_tdm)\n{\n\t/* This is technically race-y.\n\t *\n\t * In an ideal world, we could atomically assert both resets at the": "static void rockchip_snd_xfer_sync_reset(struct rk_i2s_tdm_dev *i2s_tdm)\n{\n\t/* This is technically race-y.\n\t *\n\t * In an ideal world, we could atomically assert both resets at the\n\t * same time, through an atomic bulk reset API. This API however does\n\t * not exist, so what the downstream vendor code used to do was\n\t * implement half a reset controller here and require the CRU to be\n\t * passed to the driver as a device tree node. Violating abstractions\n\t * like that is bad, especially when it influences something like the\n\t * bindings which are supposed to describe the hardware, not whatever\n\t * workarounds the driver needs, so it was dropped.\n\t *\n\t * In practice, asserting the resets one by one appears to work just\n\t * fine for playback. During duplex (playback + capture) operation,\n\t * this might become an issue, but that should be solved by the\n\t * implementation of the aforementioned API, not by shoving a reset\n\t * controller into an audio driver.\n\t */\n\n\treset_control_assert(i2s_tdm->tx_reset);\n\treset_control_assert(i2s_tdm->rx_reset);\n\tudelay(10);\n\treset_control_deassert(i2s_tdm->tx_reset);\n\treset_control_deassert(i2s_tdm->rx_reset);\n\tudelay(10);\n}", "static int nx_wait_for_csb(struct nx_gzip_crb_cpb_t *cmdp)\n{\n\tlong poll = 0;\n\tuint64_t t;\n": "static int nx_wait_for_csb(struct nx_gzip_crb_cpb_t *cmdp)\n{\n\tlong poll = 0;\n\tuint64_t t;\n\n\t/* Save power and let other threads use the h/w. top may show\n\t * 100% but only because OS doesn't know we slowed the this\n\t * h/w thread while polling. We're letting other threads have\n\t * higher throughput on the core.\n\t */\n\tcpu_pri_low();\n\n#define CSB_MAX_POLL 200000000UL\n#define USLEEP_TH 300000UL\n\n\tt = __ppc_get_timebase();\n\n\twhile (getnn(cmdp->crb.csb, csb_v) == 0) {\n\t\t++poll;\n\t\thwsync();\n\n\t\tcpu_pri_low();\n\n\t\t/* usleep(0) takes around 29000 ticks ~60 us.\n\t\t * 300000 is spinning for about 600 us then\n\t\t * start sleeping.\n\t\t */\n\t\tif ((__ppc_get_timebase() - t) > USLEEP_TH) {\n\t\t\tcpu_pri_default();\n\t\t\tusleep(1);\n\t\t}\n\n\t\tif (poll > CSB_MAX_POLL)\n\t\t\tbreak;\n\n\t\t/* Fault address from signal handler */\n\t\tif (nx_fault_storage_address) {\n\t\t\tcpu_pri_default();\n\t\t\treturn -EAGAIN;\n\t\t}\n\n\t}\n\n\tcpu_pri_default();\n\n\t/* hw has updated csb and output buffer */\n\thwsync();\n\n\t/* Check CSB flags. */\n\tif (getnn(cmdp->crb.csb, csb_v) == 0) {\n\t\tfprintf(stderr, \"CSB still not valid after %d polls.\\n\",\n\t\t\t(int) poll);\n\t\tprt_err(\"CSB still not valid after %d polls, giving up.\\n\",\n\t\t\t(int) poll);\n\t\treturn -ETIMEDOUT;\n\t}\n\n\treturn 0;\n}", "static void smc_llc_send_request_add_link(struct smc_link *link)\n{\n\tstruct smc_llc_msg_req_add_link_v2 *llc;\n\tstruct smc_wr_tx_pend_priv *pend;\n\tstruct smc_wr_v2_buf *wr_buf;": "static void smc_llc_send_request_add_link(struct smc_link *link)\n{\n\tstruct smc_llc_msg_req_add_link_v2 *llc;\n\tstruct smc_wr_tx_pend_priv *pend;\n\tstruct smc_wr_v2_buf *wr_buf;\n\tstruct smc_gidlist gidlist;\n\tint rc, len, i;\n\n\tif (!smc_wr_tx_link_hold(link))\n\t\treturn;\n\tif (link->lgr->type == SMC_LGR_SYMMETRIC ||\n\t link->lgr->type == SMC_LGR_ASYMMETRIC_PEER)\n\t\tgoto put_out;\n\n\tsmc_fill_gid_list(link->lgr, &gidlist, link->smcibdev, link->gid);\n\tif (gidlist.len <= 1)\n\t\tgoto put_out;\n\n\trc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend);\n\tif (rc)\n\t\tgoto put_out;\n\tllc = (struct smc_llc_msg_req_add_link_v2 *)wr_buf;\n\tmemset(llc, 0, SMC_WR_TX_SIZE);\n\n\tllc->hd.common.llc_type = SMC_LLC_REQ_ADD_LINK;\n\tfor (i = 0; i < gidlist.len; i++)\n\t\tmemcpy(llc->gid[i], gidlist.list[i], sizeof(gidlist.list[0]));\n\tllc->gid_cnt = gidlist.len;\n\tlen = sizeof(*llc) + (gidlist.len * sizeof(gidlist.list[0]));\n\tsmc_llc_init_msg_hdr(&llc->hd, link->lgr, len);\n\trc = smc_wr_tx_v2_send(link, pend, len);\n\tif (!rc)\n\t\t/* set REQ_ADD_LINK flow and wait for response from peer */\n\t\tlink->lgr->llc_flow_lcl.type = SMC_LLC_FLOW_REQ_ADD_LINK;\nput_out:\n\tsmc_wr_tx_link_put(link);\n}", "static void sample_print_help(int mask)\n{\n\tprintf(\"Output format description\\n\\n\"\n\t \"By default, redirect success statistics are disabled, use -s to enable.\\n\"\n\t \"The terse output mode is default, verbose mode can be activated using -v\\n\"": "static void sample_print_help(int mask)\n{\n\tprintf(\"Output format description\\n\\n\"\n\t \"By default, redirect success statistics are disabled, use -s to enable.\\n\"\n\t \"The terse output mode is default, verbose mode can be activated using -v\\n\"\n\t \"Use SIGQUIT (Ctrl + \\\\) to switch the mode dynamically at runtime\\n\\n\"\n\t \"Terse mode displays at most the following fields:\\n\"\n\t \" rx/s Number of packets received per second\\n\"\n\t \" redir/s Number of packets successfully redirected per second\\n\"\n\t \" err,drop/s Aggregated count of errors per second (including dropped packets)\\n\"\n\t \" xmit/s Number of packets transmitted on the output device per second\\n\\n\"\n\t \"Output description for verbose mode:\\n\"\n\t \" FIELD DESCRIPTION\\n\");\n\n\tif (mask & SAMPLE_RX_CNT) {\n\t\tprintf(\" receive\\t\\tDisplays the number of packets received & errors encountered\\n\"\n\t\t \" \\t\\t\\tWhenever an error or packet drop occurs, details of per CPU error\\n\"\n\t\t \" \\t\\t\\tand drop statistics will be expanded inline in terse mode.\\n\"\n\t\t \" \\t\\t\\t\\tpkt/s - Packets received per second\\n\"\n\t\t \" \\t\\t\\t\\tdrop/s - Packets dropped per second\\n\"\n\t\t \" \\t\\t\\t\\terror/s - Errors encountered per second\\n\\n\");\n\t}\n\tif (mask & (SAMPLE_REDIRECT_CNT | SAMPLE_REDIRECT_ERR_CNT)) {\n\t\tprintf(\" redirect\\t\\tDisplays the number of packets successfully redirected\\n\"\n\t\t \" \\t\\t\\tErrors encountered are expanded under redirect_err field\\n\"\n\t\t \" \\t\\t\\tNote that passing -s to enable it has a per packet overhead\\n\"\n\t\t \" \\t\\t\\t\\tredir/s - Packets redirected successfully per second\\n\\n\"\n\t\t \" redirect_err\\t\\tDisplays the number of packets that failed redirection\\n\"\n\t\t \" \\t\\t\\tThe errno is expanded under this field with per CPU count\\n\"\n\t\t \" \\t\\t\\tThe recognized errors are:\\n\");\n\n\t\tfor (int i = 2; i < XDP_REDIRECT_ERR_MAX; i++)\n\t\t\tprintf(\"\\t\\t\\t %s: %s\\n\", xdp_redirect_err_names[i],\n\t\t\t xdp_redirect_err_help[i - 1]);\n\n\t\tprintf(\" \\n\\t\\t\\t\\terror/s - Packets that failed redirection per second\\n\\n\");\n\t}\n\n\tif (mask & SAMPLE_CPUMAP_ENQUEUE_CNT) {\n\t\tprintf(\" enqueue to cpu N\\tDisplays the number of packets enqueued to bulk queue of CPU N\\n\"\n\t\t \" \\t\\t\\tExpands to cpu:FROM->N to display enqueue stats for each CPU enqueuing to CPU N\\n\"\n\t\t \" \\t\\t\\tReceived packets can be associated with the CPU redirect program is enqueuing \\n\"\n\t\t \" \\t\\t\\tpackets to.\\n\"\n\t\t \" \\t\\t\\t\\tpkt/s - Packets enqueued per second from other CPU to CPU N\\n\"\n\t\t \" \\t\\t\\t\\tdrop/s - Packets dropped when trying to enqueue to CPU N\\n\"\n\t\t \" \\t\\t\\t\\tbulk-avg - Average number of packets processed for each event\\n\\n\");\n\t}\n\n\tif (mask & SAMPLE_CPUMAP_KTHREAD_CNT) {\n\t\tprintf(\" kthread\\t\\tDisplays the number of packets processed in CPUMAP kthread for each CPU\\n\"\n\t\t \" \\t\\t\\tPackets consumed from ptr_ring in kthread, and its xdp_stats (after calling \\n\"\n\t\t \" \\t\\t\\tCPUMAP bpf prog) are expanded below this. xdp_stats are expanded as a total and\\n\"\n\t\t \" \\t\\t\\tthen per-CPU to associate it to each CPU's pinned CPUMAP kthread.\\n\"\n\t\t \" \\t\\t\\t\\tpkt/s - Packets consumed per second from ptr_ring\\n\"\n\t\t \" \\t\\t\\t\\tdrop/s - Packets dropped per second in kthread\\n\"\n\t\t \" \\t\\t\\t\\tsched - Number of times kthread called schedule()\\n\\n\"\n\t\t \" \\t\\t\\txdp_stats (also expands to per-CPU counts)\\n\"\n\t\t \" \\t\\t\\t\\tpass/s - XDP_PASS count for CPUMAP program execution\\n\"\n\t\t \" \\t\\t\\t\\tdrop/s - XDP_DROP count for CPUMAP program execution\\n\"\n\t\t \" \\t\\t\\t\\tredir/s - XDP_REDIRECT count for CPUMAP program execution\\n\\n\");\n\t}\n\n\tif (mask & SAMPLE_EXCEPTION_CNT) {\n\t\tprintf(\" xdp_exception\\t\\tDisplays xdp_exception tracepoint events\\n\"\n\t\t \" \\t\\t\\tThis can occur due to internal driver errors, unrecognized\\n\"\n\t\t \" \\t\\t\\tXDP actions and due to explicit user trigger by use of XDP_ABORTED\\n\"\n\t\t \" \\t\\t\\tEach action is expanded below this field with its count\\n\"\n\t\t \" \\t\\t\\t\\thit/s - Number of times the tracepoint was hit per second\\n\\n\");\n\t}\n\n\tif (mask & SAMPLE_DEVMAP_XMIT_CNT) {\n\t\tprintf(\" devmap_xmit\\t\\tDisplays devmap_xmit tracepoint events\\n\"\n\t\t \" \\t\\t\\tThis tracepoint is invoked for successful transmissions on output\\n\"\n\t\t \" \\t\\t\\tdevice but these statistics are not available for generic XDP mode,\\n\"\n\t\t \" \\t\\t\\thence they will be omitted from the output when using SKB mode\\n\"\n\t\t \" \\t\\t\\t\\txmit/s - Number of packets that were transmitted per second\\n\"\n\t\t \" \\t\\t\\t\\tdrop/s - Number of packets that failed transmissions per second\\n\"\n\t\t \" \\t\\t\\t\\tdrv_err/s - Number of internal driver errors per second\\n\"\n\t\t \" \\t\\t\\t\\tbulk-avg - Average number of packets processed for each event\\n\\n\");\n\t}\n}", "static void sbz_chipio_startup_data(struct hda_codec *codec)\n{\n\tconst struct chipio_stream_remap_data *dsp_out_remap_data;\n\tstruct ca0132_spec *spec = codec->spec;\n": "static void sbz_chipio_startup_data(struct hda_codec *codec)\n{\n\tconst struct chipio_stream_remap_data *dsp_out_remap_data;\n\tstruct ca0132_spec *spec = codec->spec;\n\n\tmutex_lock(&spec->chipio_mutex);\n\tcodec_dbg(codec, \"Startup Data entered, mutex locked and loaded.\\n\");\n\n\t/* Remap DAC0's output ports. */\n\tchipio_remap_stream(codec, &stream_remap_data[0]);\n\n\t/* Remap DSP audio output stream ports. */\n\tswitch (ca0132_quirk(spec)) {\n\tcase QUIRK_SBZ:\n\t\tdsp_out_remap_data = &stream_remap_data[1];\n\t\tbreak;\n\n\tcase QUIRK_ZXR:\n\t\tdsp_out_remap_data = &stream_remap_data[2];\n\t\tbreak;\n\n\tdefault:\n\t\tdsp_out_remap_data = NULL;\n\t\tbreak;\n\t}\n\n\tif (dsp_out_remap_data)\n\t\tchipio_remap_stream(codec, dsp_out_remap_data);\n\n\tcodec_dbg(codec, \"Startup Data exited, mutex released.\\n\");\n\tmutex_unlock(&spec->chipio_mutex);\n}", "static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)\n{\n\tint idx, nr_field, key_off, field_marker, keyoff_marker;\n\tint max_key_off, max_bit_pos, group_member;\n\tstruct nix_rx_flowkey_alg *field;": "static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)\n{\n\tint idx, nr_field, key_off, field_marker, keyoff_marker;\n\tint max_key_off, max_bit_pos, group_member;\n\tstruct nix_rx_flowkey_alg *field;\n\tstruct nix_rx_flowkey_alg tmp;\n\tu32 key_type, valid_key;\n\tint l4_key_offset = 0;\n\n\tif (!alg)\n\t\treturn -EINVAL;\n\n#define FIELDS_PER_ALG 5\n#define MAX_KEY_OFF\t40\n\t/* Clear all fields */\n\tmemset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);\n\n\t/* Each of the 32 possible flow key algorithm definitions should\n\t * fall into above incremental config (except ALG0). Otherwise a\n\t * single NPC MCAM entry is not sufficient for supporting RSS.\n\t *\n\t * If a different definition or combination needed then NPC MCAM\n\t * has to be programmed to filter such pkts and it's action should\n\t * point to this definition to calculate flowtag or hash.\n\t *\n\t * The `for loop` goes over _all_ protocol field and the following\n\t * variables depicts the state machine forward progress logic.\n\t *\n\t * keyoff_marker - Enabled when hash byte length needs to be accounted\n\t * in field->key_offset update.\n\t * field_marker - Enabled when a new field needs to be selected.\n\t * group_member - Enabled when protocol is part of a group.\n\t */\n\n\tkeyoff_marker = 0; max_key_off = 0; group_member = 0;\n\tnr_field = 0; key_off = 0; field_marker = 1;\n\tfield = &tmp; max_bit_pos = fls(flow_cfg);\n\tfor (idx = 0;\n\t idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&\n\t key_off < MAX_KEY_OFF; idx++) {\n\t\tkey_type = BIT(idx);\n\t\tvalid_key = flow_cfg & key_type;\n\t\t/* Found a field marker, reset the field values */\n\t\tif (field_marker)\n\t\t\tmemset(&tmp, 0, sizeof(tmp));\n\n\t\tfield_marker = true;\n\t\tkeyoff_marker = true;\n\t\tswitch (key_type) {\n\t\tcase NIX_FLOW_KEY_TYPE_PORT:\n\t\t\tfield->sel_chan = true;\n\t\t\t/* This should be set to 1, when SEL_CHAN is set */\n\t\t\tfield->bytesm1 = 1;\n\t\t\tbreak;\n\t\tcase NIX_FLOW_KEY_TYPE_IPV4_PROTO:\n\t\t\tfield->lid = NPC_LID_LC;\n\t\t\tfield->hdr_offset = 9; /* offset */\n\t\t\tfield->bytesm1 = 0; /* 1 byte */\n\t\t\tfield->ltype_match = NPC_LT_LC_IP;\n\t\t\tfield->ltype_mask = 0xF;\n\t\t\tbreak;\n\t\tcase NIX_FLOW_KEY_TYPE_IPV4:\n\t\tcase NIX_FLOW_KEY_TYPE_INNR_IPV4:\n\t\t\tfield->lid = NPC_LID_LC;\n\t\t\tfield->ltype_match = NPC_LT_LC_IP;\n\t\t\tif (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {\n\t\t\t\tfield->lid = NPC_LID_LG;\n\t\t\t\tfield->ltype_match = NPC_LT_LG_TU_IP;\n\t\t\t}\n\t\t\tfield->hdr_offset = 12; /* SIP offset */\n\t\t\tfield->bytesm1 = 7; /* SIP + DIP, 8 bytes */\n\t\t\tfield->ltype_mask = 0xF; /* Match only IPv4 */\n\t\t\tkeyoff_marker = false;\n\t\t\tbreak;\n\t\tcase NIX_FLOW_KEY_TYPE_IPV6:\n\t\tcase NIX_FLOW_KEY_TYPE_INNR_IPV6:\n\t\t\tfield->lid = NPC_LID_LC;\n\t\t\tfield->ltype_match = NPC_LT_LC_IP6;\n\t\t\tif (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {\n\t\t\t\tfield->lid = NPC_LID_LG;\n\t\t\t\tfield->ltype_match = NPC_LT_LG_TU_IP6;\n\t\t\t}\n\t\t\tfield->hdr_offset = 8; /* SIP offset */\n\t\t\tfield->bytesm1 = 31; /* SIP + DIP, 32 bytes */\n\t\t\tfield->ltype_mask = 0xF; /* Match only IPv6 */\n\t\t\tbreak;\n\t\tcase NIX_FLOW_KEY_TYPE_TCP:\n\t\tcase NIX_FLOW_KEY_TYPE_UDP:\n\t\tcase NIX_FLOW_KEY_TYPE_SCTP:\n\t\tcase NIX_FLOW_KEY_TYPE_INNR_TCP:\n\t\tcase NIX_FLOW_KEY_TYPE_INNR_UDP:\n\t\tcase NIX_FLOW_KEY_TYPE_INNR_SCTP:\n\t\t\tfield->lid = NPC_LID_LD;\n\t\t\tif (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||\n\t\t\t key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||\n\t\t\t key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)\n\t\t\t\tfield->lid = NPC_LID_LH;\n\t\t\tfield->bytesm1 = 3; /* Sport + Dport, 4 bytes */\n\n\t\t\t/* Enum values for NPC_LID_LD and NPC_LID_LG are same,\n\t\t\t * so no need to change the ltype_match, just change\n\t\t\t * the lid for inner protocols\n\t\t\t */\n\t\t\tBUILD_BUG_ON((int)NPC_LT_LD_TCP !=\n\t\t\t\t (int)NPC_LT_LH_TU_TCP);\n\t\t\tBUILD_BUG_ON((int)NPC_LT_LD_UDP !=\n\t\t\t\t (int)NPC_LT_LH_TU_UDP);\n\t\t\tBUILD_BUG_ON((int)NPC_LT_LD_SCTP !=\n\t\t\t\t (int)NPC_LT_LH_TU_SCTP);\n\n\t\t\tif ((key_type == NIX_FLOW_KEY_TYPE_TCP ||\n\t\t\t key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&\n\t\t\t valid_key) {\n\t\t\t\tfield->ltype_match |= NPC_LT_LD_TCP;\n\t\t\t\tgroup_member = true;\n\t\t\t} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||\n\t\t\t\t key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&\n\t\t\t\t valid_key) {\n\t\t\t\tfield->ltype_match |= NPC_LT_LD_UDP;\n\t\t\t\tgroup_member = true;\n\t\t\t} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||\n\t\t\t\t key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&\n\t\t\t\t valid_key) {\n\t\t\t\tfield->ltype_match |= NPC_LT_LD_SCTP;\n\t\t\t\tgroup_member = true;\n\t\t\t}\n\t\t\tfield->ltype_mask = ~field->ltype_match;\n\t\t\tif (key_type == NIX_FLOW_KEY_TYPE_SCTP ||\n\t\t\t key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {\n\t\t\t\t/* Handle the case where any of the group item\n\t\t\t\t * is enabled in the group but not the final one\n\t\t\t\t */\n\t\t\t\tif (group_member) {\n\t\t\t\t\tvalid_key = true;\n\t\t\t\t\tgroup_member = false;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfield_marker = false;\n\t\t\t\tkeyoff_marker = false;\n\t\t\t}\n\n\t\t\t/* TCP/UDP/SCTP and ESP/AH falls at same offset so\n\t\t\t * remember the TCP key offset of 40 byte hash key.\n\t\t\t */\n\t\t\tif (key_type == NIX_FLOW_KEY_TYPE_TCP)\n\t\t\t\tl4_key_offset = key_off;\n\t\t\tbreak;\n\t\tcase NIX_FLOW_KEY_TYPE_NVGRE:\n\t\t\tfield->lid = NPC_LID_LD;\n\t\t\tfield->hdr_offset = 4; /* VSID offset */\n\t\t\tfield->bytesm1 = 2;\n\t\t\tfield->ltype_match = NPC_LT_LD_NVGRE;\n\t\t\tfield->ltype_mask = 0xF;\n\t\t\tbreak;\n\t\tcase NIX_FLOW_KEY_TYPE_VXLAN:\n\t\tcase NIX_FLOW_KEY_TYPE_GENEVE:\n\t\t\tfield->lid = NPC_LID_LE;\n\t\t\tfield->bytesm1 = 2;\n\t\t\tfield->hdr_offset = 4;\n\t\t\tfield->ltype_mask = 0xF;\n\t\t\tfield_marker = false;\n\t\t\tkeyoff_marker = false;\n\n\t\t\tif (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {\n\t\t\t\tfield->ltype_match |= NPC_LT_LE_VXLAN;\n\t\t\t\tgroup_member = true;\n\t\t\t}\n\n\t\t\tif (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {\n\t\t\t\tfield->ltype_match |= NPC_LT_LE_GENEVE;\n\t\t\t\tgroup_member = true;\n\t\t\t}\n\n\t\t\tif (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {\n\t\t\t\tif (group_member) {\n\t\t\t\t\tfield->ltype_mask = ~field->ltype_match;\n\t\t\t\t\tfield_marker = true;\n\t\t\t\t\tkeyoff_marker = true;\n\t\t\t\t\tvalid_key = true;\n\t\t\t\t\tgroup_member = false;\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak;\n\t\tcase NIX_FLOW_KEY_TYPE_ETH_DMAC:\n\t\tcase NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:\n\t\t\tfield->lid = NPC_LID_LA;\n\t\t\tfield->ltype_match = NPC_LT_LA_ETHER;\n\t\t\tif (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {\n\t\t\t\tfield->lid = NPC_LID_LF;\n\t\t\t\tfield->ltype_match = NPC_LT_LF_TU_ETHER;\n\t\t\t}\n\t\t\tfield->hdr_offset = 0;\n\t\t\tfield->bytesm1 = 5; /* DMAC 6 Byte */\n\t\t\tfield->ltype_mask = 0xF;\n\t\t\tbreak;\n\t\tcase NIX_FLOW_KEY_TYPE_IPV6_EXT:\n\t\t\tfield->lid = NPC_LID_LC;\n\t\t\tfield->hdr_offset = 40; /* IPV6 hdr */\n\t\t\tfield->bytesm1 = 0; /* 1 Byte ext hdr*/\n\t\t\tfield->ltype_match = NPC_LT_LC_IP6_EXT;\n\t\t\tfield->ltype_mask = 0xF;\n\t\t\tbreak;\n\t\tcase NIX_FLOW_KEY_TYPE_GTPU:\n\t\t\tfield->lid = NPC_LID_LE;\n\t\t\tfield->hdr_offset = 4;\n\t\t\tfield->bytesm1 = 3; /* 4 bytes TID*/\n\t\t\tfield->ltype_match = NPC_LT_LE_GTPU;\n\t\t\tfield->ltype_mask = 0xF;\n\t\t\tbreak;\n\t\tcase NIX_FLOW_KEY_TYPE_VLAN:\n\t\t\tfield->lid = NPC_LID_LB;\n\t\t\tfield->hdr_offset = 2; /* Skip TPID (2-bytes) */\n\t\t\tfield->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */\n\t\t\tfield->ltype_match = NPC_LT_LB_CTAG;\n\t\t\tfield->ltype_mask = 0xF;\n\t\t\tfield->fn_mask = 1; /* Mask out the first nibble */\n\t\t\tbreak;\n\t\tcase NIX_FLOW_KEY_TYPE_AH:\n\t\tcase NIX_FLOW_KEY_TYPE_ESP:\n\t\t\tfield->hdr_offset = 0;\n\t\t\tfield->bytesm1 = 7; /* SPI + sequence number */\n\t\t\tfield->ltype_mask = 0xF;\n\t\t\tfield->lid = NPC_LID_LE;\n\t\t\tfield->ltype_match = NPC_LT_LE_ESP;\n\t\t\tif (key_type == NIX_FLOW_KEY_TYPE_AH) {\n\t\t\t\tfield->lid = NPC_LID_LD;\n\t\t\t\tfield->ltype_match = NPC_LT_LD_AH;\n\t\t\t\tfield->hdr_offset = 4;\n\t\t\t\tkeyoff_marker = false;\n\t\t\t}\n\t\t\tbreak;\n\t\t}\n\t\tfield->ena = 1;\n\n\t\t/* Found a valid flow key type */\n\t\tif (valid_key) {\n\t\t\t/* Use the key offset of TCP/UDP/SCTP fields\n\t\t\t * for ESP/AH fields.\n\t\t\t */\n\t\t\tif (key_type == NIX_FLOW_KEY_TYPE_ESP ||\n\t\t\t key_type == NIX_FLOW_KEY_TYPE_AH)\n\t\t\t\tkey_off = l4_key_offset;\n\t\t\tfield->key_offset = key_off;\n\t\t\tmemcpy(&alg[nr_field], field, sizeof(*field));\n\t\t\tmax_key_off = max(max_key_off, field->bytesm1 + 1);\n\n\t\t\t/* Found a field marker, get the next field */\n\t\t\tif (field_marker)\n\t\t\t\tnr_field++;\n\t\t}\n\n\t\t/* Found a keyoff marker, update the new key_off */\n\t\tif (keyoff_marker) {\n\t\t\tkey_off += max_key_off;\n\t\t\tmax_key_off = 0;\n\t\t}\n\t}\n\t/* Processed all the flow key types */\n\tif (idx == max_bit_pos && key_off <= MAX_KEY_OFF)\n\t\treturn 0;\n\telse\n\t\treturn NIX_AF_ERR_RSS_NOSPC_FIELD;\n}", "static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)\n{\n\t/*\n\t * Disable PCI Bus Parking and PCI Master read caching on CX700\n\t * which causes unspecified timing errors with a VT6212L on the PCI": "static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)\n{\n\t/*\n\t * Disable PCI Bus Parking and PCI Master read caching on CX700\n\t * which causes unspecified timing errors with a VT6212L on the PCI\n\t * bus leading to USB2.0 packet loss.\n\t *\n\t * This quirk is only enabled if a second (on the external PCI bus)\n\t * VT6212L is found -- the CX700 core itself also contains a USB\n\t * host controller with the same PCI ID as the VT6212L.\n\t */\n\n\t/* Count VT6212L instances */\n\tstruct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,\n\t\tPCI_DEVICE_ID_VIA_8235_USB_2, NULL);\n\tuint8_t b;\n\n\t/*\n\t * p should contain the first (internal) VT6212L -- see if we have\n\t * an external one by searching again.\n\t */\n\tp = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);\n\tif (!p)\n\t\treturn;\n\tpci_dev_put(p);\n\n\tif (pci_read_config_byte(dev, 0x76, &b) == 0) {\n\t\tif (b & 0x40) {\n\t\t\t/* Turn off PCI Bus Parking */\n\t\t\tpci_write_config_byte(dev, 0x76, b ^ 0x40);\n\n\t\t\tpci_info(dev, \"Disabling VIA CX700 PCI parking\\n\");\n\t\t}\n\t}\n\n\tif (pci_read_config_byte(dev, 0x72, &b) == 0) {\n\t\tif (b != 0) {\n\t\t\t/* Turn off PCI Master read caching */\n\t\t\tpci_write_config_byte(dev, 0x72, 0x0);\n\n\t\t\t/* Set PCI Master Bus time-out to \"1x16 PCLK\" */\n\t\t\tpci_write_config_byte(dev, 0x75, 0x1);\n\n\t\t\t/* Disable \"Read FIFO Timer\" */\n\t\t\tpci_write_config_byte(dev, 0x77, 0x0);\n\n\t\t\tpci_info(dev, \"Disabling VIA CX700 PCI caching\\n\");\n\t\t}\n\t}\n}", "static void ae5_register_set(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\tunsigned int count = ARRAY_SIZE(ca0132_ae5_register_set_addresses);\n\tconst unsigned int *addr = ca0132_ae5_register_set_addresses;": "static void ae5_register_set(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\tunsigned int count = ARRAY_SIZE(ca0132_ae5_register_set_addresses);\n\tconst unsigned int *addr = ca0132_ae5_register_set_addresses;\n\tconst unsigned char *data = ca0132_ae5_register_set_data;\n\tunsigned int i, cur_addr;\n\tunsigned char tmp[3];\n\n\tif (ca0132_quirk(spec) == QUIRK_AE7)\n\t\tchipio_8051_write_pll_pmu(codec, 0x41, 0xc8);\n\n\tchipio_8051_write_direct(codec, 0x93, 0x10);\n\tchipio_8051_write_pll_pmu(codec, 0x44, 0xc2);\n\n\tif (ca0132_quirk(spec) == QUIRK_AE7) {\n\t\ttmp[0] = 0x03;\n\t\ttmp[1] = 0x03;\n\t\ttmp[2] = 0x07;\n\t} else {\n\t\ttmp[0] = 0x0f;\n\t\ttmp[1] = 0x0f;\n\t\ttmp[2] = 0x0f;\n\t}\n\n\tfor (i = cur_addr = 0; i < 3; i++, cur_addr++)\n\t\twriteb(tmp[i], spec->mem_base + addr[cur_addr]);\n\n\t/*\n\t * First writes are in single bytes, final are in 4 bytes. So, we use\n\t * writeb, then writel.\n\t */\n\tfor (i = 0; cur_addr < 12; i++, cur_addr++)\n\t\twriteb(data[i], spec->mem_base + addr[cur_addr]);\n\n\tfor (; cur_addr < count; i++, cur_addr++)\n\t\twritel(data[i], spec->mem_base + addr[cur_addr]);\n\n\twritel(0x00800001, spec->mem_base + 0x20c);\n\n\tif (ca0132_quirk(spec) == QUIRK_AE7) {\n\t\tca0113_mmio_command_set_type2(codec, 0x48, 0x07, 0x83);\n\t\tca0113_mmio_command_set(codec, 0x30, 0x2e, 0x3f);\n\t} else {\n\t\tca0113_mmio_command_set(codec, 0x30, 0x2d, 0x3f);\n\t}\n\n\tchipio_8051_write_direct(codec, 0x90, 0x00);\n\tchipio_8051_write_direct(codec, 0x90, 0x10);\n\n\tif (ca0132_quirk(spec) == QUIRK_AE5)\n\t\tca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);\n}", "static int get_empty_pcm_device(struct hda_bus *bus, unsigned int type)\n{\n\t/* audio device indices; not linear to keep compatibility */\n\t/* assigned to static slots up to dev#10; if more needed, assign\n\t * the later slot dynamically (when CONFIG_SND_DYNAMIC_MINORS=y)": "static int get_empty_pcm_device(struct hda_bus *bus, unsigned int type)\n{\n\t/* audio device indices; not linear to keep compatibility */\n\t/* assigned to static slots up to dev#10; if more needed, assign\n\t * the later slot dynamically (when CONFIG_SND_DYNAMIC_MINORS=y)\n\t */\n\tstatic const int audio_idx[HDA_PCM_NTYPES][5] = {\n\t\t[HDA_PCM_TYPE_AUDIO] = { 0, 2, 4, 5, -1 },\n\t\t[HDA_PCM_TYPE_SPDIF] = { 1, -1 },\n\t\t[HDA_PCM_TYPE_HDMI] = { 3, 7, 8, 9, -1 },\n\t\t[HDA_PCM_TYPE_MODEM] = { 6, -1 },\n\t};\n\tint i;\n\n\tif (type >= HDA_PCM_NTYPES) {\n\t\tdev_err(bus->card->dev, \"Invalid PCM type %d\\n\", type);\n\t\treturn -EINVAL;\n\t}\n\n\tfor (i = 0; audio_idx[type][i] >= 0; i++) {\n#ifndef CONFIG_SND_DYNAMIC_MINORS\n\t\tif (audio_idx[type][i] >= 8)\n\t\t\tbreak;\n#endif\n\t\tif (!test_and_set_bit(audio_idx[type][i], bus->pcm_dev_bits))\n\t\t\treturn audio_idx[type][i];\n\t}\n\n#ifdef CONFIG_SND_DYNAMIC_MINORS\n\t/* non-fixed slots starting from 10 */\n\tfor (i = 10; i < 32; i++) {\n\t\tif (!test_and_set_bit(i, bus->pcm_dev_bits))\n\t\t\treturn i;\n\t}\n#endif\n\n\tdev_warn(bus->card->dev, \"Too many %s devices\\n\",\n\t\tsnd_hda_pcm_type_name[type]);\n#ifndef CONFIG_SND_DYNAMIC_MINORS\n\tdev_warn(bus->card->dev,\n\t\t \"Consider building the kernel with CONFIG_SND_DYNAMIC_MINORS=y\\n\");\n#endif\n\treturn -EAGAIN;\n}", "static void replay_journal(struct dm_integrity_c *ic)\n{\n\tunsigned i, j;\n\tbool used_commit_ids[N_COMMIT_IDS];\n\tunsigned max_commit_id_sections[N_COMMIT_IDS];": "static void replay_journal(struct dm_integrity_c *ic)\n{\n\tunsigned i, j;\n\tbool used_commit_ids[N_COMMIT_IDS];\n\tunsigned max_commit_id_sections[N_COMMIT_IDS];\n\tunsigned write_start, write_sections;\n\tunsigned continue_section;\n\tbool journal_empty;\n\tunsigned char unused, last_used, want_commit_seq;\n\n\tif (ic->mode == 'R')\n\t\treturn;\n\n\tif (ic->journal_uptodate)\n\t\treturn;\n\n\tlast_used = 0;\n\twrite_start = 0;\n\n\tif (!ic->just_formatted) {\n\t\tDEBUG_print(\"reading journal\\n\");\n\t\trw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);\n\t\tif (ic->journal_io)\n\t\t\tDEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, \"read journal\");\n\t\tif (ic->journal_io) {\n\t\t\tstruct journal_completion crypt_comp;\n\t\t\tcrypt_comp.ic = ic;\n\t\t\tinit_completion(&crypt_comp.comp);\n\t\t\tcrypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);\n\t\t\tencrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);\n\t\t\twait_for_completion(&crypt_comp.comp);\n\t\t}\n\t\tDEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, \"decrypted journal\");\n\t}\n\n\tif (dm_integrity_failed(ic))\n\t\tgoto clear_journal;\n\n\tjournal_empty = true;\n\tmemset(used_commit_ids, 0, sizeof used_commit_ids);\n\tmemset(max_commit_id_sections, 0, sizeof max_commit_id_sections);\n\tfor (i = 0; i < ic->journal_sections; i++) {\n\t\tfor (j = 0; j < ic->journal_section_sectors; j++) {\n\t\t\tint k;\n\t\t\tstruct journal_sector *js = access_journal(ic, i, j);\n\t\t\tk = find_commit_seq(ic, i, j, js->commit_id);\n\t\t\tif (k < 0)\n\t\t\t\tgoto clear_journal;\n\t\t\tused_commit_ids[k] = true;\n\t\t\tmax_commit_id_sections[k] = i;\n\t\t}\n\t\tif (journal_empty) {\n\t\t\tfor (j = 0; j < ic->journal_section_entries; j++) {\n\t\t\t\tstruct journal_entry *je = access_journal_entry(ic, i, j);\n\t\t\t\tif (!journal_entry_is_unused(je)) {\n\t\t\t\t\tjournal_empty = false;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif (!used_commit_ids[N_COMMIT_IDS - 1]) {\n\t\tunused = N_COMMIT_IDS - 1;\n\t\twhile (unused && !used_commit_ids[unused - 1])\n\t\t\tunused--;\n\t} else {\n\t\tfor (unused = 0; unused < N_COMMIT_IDS; unused++)\n\t\t\tif (!used_commit_ids[unused])\n\t\t\t\tbreak;\n\t\tif (unused == N_COMMIT_IDS) {\n\t\t\tdm_integrity_io_error(ic, \"journal commit ids\", -EIO);\n\t\t\tgoto clear_journal;\n\t\t}\n\t}\n\tDEBUG_print(\"first unused commit seq %d [%d,%d,%d,%d]\\n\",\n\t\t unused, used_commit_ids[0], used_commit_ids[1],\n\t\t used_commit_ids[2], used_commit_ids[3]);\n\n\tlast_used = prev_commit_seq(unused);\n\twant_commit_seq = prev_commit_seq(last_used);\n\n\tif (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])\n\t\tjournal_empty = true;\n\n\twrite_start = max_commit_id_sections[last_used] + 1;\n\tif (unlikely(write_start >= ic->journal_sections))\n\t\twant_commit_seq = next_commit_seq(want_commit_seq);\n\twraparound_section(ic, &write_start);\n\n\ti = write_start;\n\tfor (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {\n\t\tfor (j = 0; j < ic->journal_section_sectors; j++) {\n\t\t\tstruct journal_sector *js = access_journal(ic, i, j);\n\n\t\t\tif (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {\n\t\t\t\t/*\n\t\t\t\t * This could be caused by crash during writing.\n\t\t\t\t * We won't replay the inconsistent part of the\n\t\t\t\t * journal.\n\t\t\t\t */\n\t\t\t\tDEBUG_print(\"commit id mismatch at position (%u, %u): %d != %d\\n\",\n\t\t\t\t\t i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);\n\t\t\t\tgoto brk;\n\t\t\t}\n\t\t}\n\t\ti++;\n\t\tif (unlikely(i >= ic->journal_sections))\n\t\t\twant_commit_seq = next_commit_seq(want_commit_seq);\n\t\twraparound_section(ic, &i);\n\t}\nbrk:\n\n\tif (!journal_empty) {\n\t\tDEBUG_print(\"replaying %u sections, starting at %u, commit seq %d\\n\",\n\t\t\t write_sections, write_start, want_commit_seq);\n\t\tdo_journal_write(ic, write_start, write_sections, true);\n\t}\n\n\tif (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {\n\t\tcontinue_section = write_start;\n\t\tic->commit_seq = want_commit_seq;\n\t\tDEBUG_print(\"continuing from section %u, commit seq %d\\n\", write_start, ic->commit_seq);\n\t} else {\n\t\tunsigned s;\n\t\tunsigned char erase_seq;\nclear_journal:\n\t\tDEBUG_print(\"clearing journal\\n\");\n\n\t\terase_seq = prev_commit_seq(prev_commit_seq(last_used));\n\t\ts = write_start;\n\t\tinit_journal(ic, s, 1, erase_seq);\n\t\ts++;\n\t\twraparound_section(ic, &s);\n\t\tif (ic->journal_sections >= 2) {\n\t\t\tinit_journal(ic, s, ic->journal_sections - 2, erase_seq);\n\t\t\ts += ic->journal_sections - 2;\n\t\t\twraparound_section(ic, &s);\n\t\t\tinit_journal(ic, s, 1, erase_seq);\n\t\t}\n\n\t\tcontinue_section = 0;\n\t\tic->commit_seq = next_commit_seq(erase_seq);\n\t}\n\n\tic->committed_section = continue_section;\n\tic->n_committed_sections = 0;\n\n\tic->uncommitted_section = continue_section;\n\tic->n_uncommitted_sections = 0;\n\n\tic->free_section = continue_section;\n\tic->free_section_entry = 0;\n\tic->free_sectors = ic->journal_entries;\n\n\tic->journal_tree_root = RB_ROOT;\n\tfor (i = 0; i < ic->journal_entries; i++)\n\t\tinit_journal_node(&ic->journal_tree[i]);\n}", "static void flyvideo_gpio(struct bttv *btv)\n{\n\tint gpio, has_remote, has_radio, is_capture_only;\n\tint is_lr90, has_tda9820_tda9821;\n\tint tuner_type = UNSET, ttype;": "static void flyvideo_gpio(struct bttv *btv)\n{\n\tint gpio, has_remote, has_radio, is_capture_only;\n\tint is_lr90, has_tda9820_tda9821;\n\tint tuner_type = UNSET, ttype;\n\n\tgpio_inout(0xffffff, 0);\n\tudelay(8); /* without this we would see the 0x1800 mask */\n\tgpio = gpio_read();\n\t/* FIXME: must restore OUR_EN ??? */\n\n\t/* all cards provide GPIO info, some have an additional eeprom\n\t * LR50: GPIO coding can be found lower right CP1 .. CP9\n\t * CP9=GPIO23 .. CP1=GPIO15; when OPEN, the corresponding GPIO reads 1.\n\t * GPIO14-12: n.c.\n\t * LR90: GP9=GPIO23 .. GP1=GPIO15 (right above the bt878)\n\n\t * lowest 3 bytes are remote control codes (no handshake needed)\n\t * xxxFFF: No remote control chip soldered\n\t * xxxF00(LR26/LR50), xxxFE0(LR90): Remote control chip (LVA001 or CF45) soldered\n\t * Note: Some bits are Audio_Mask !\n\t */\n\tttype = (gpio & 0x0f0000) >> 16;\n\tswitch (ttype) {\n\tcase 0x0:\n\t\ttuner_type = 2; /* NTSC, e.g. TPI8NSR11P */\n\t\tbreak;\n\tcase 0x2:\n\t\ttuner_type = 39; /* LG NTSC (newer TAPC series) TAPC-H701P */\n\t\tbreak;\n\tcase 0x4:\n\t\ttuner_type = 5; /* Philips PAL TPI8PSB02P, TPI8PSB12P, TPI8PSB12D or FI1216, FM1216 */\n\t\tbreak;\n\tcase 0x6:\n\t\ttuner_type = 37; /* LG PAL (newer TAPC series) TAPC-G702P */\n\t\tbreak;\n\tcase 0xC:\n\t\ttuner_type = 3; /* Philips SECAM(+PAL) FQ1216ME or FI1216MF */\n\t\tbreak;\n\tdefault:\n\t\tpr_info(\"%d: FlyVideo_gpio: unknown tuner type\\n\", btv->c.nr);\n\t\tbreak;\n\t}\n\n\thas_remote = gpio & 0x800000;\n\thas_radio\t = gpio & 0x400000;\n\t/* unknown 0x200000;\n\t * unknown2 0x100000; */\n\tis_capture_only = !(gpio & 0x008000); /* GPIO15 */\n\thas_tda9820_tda9821 = !(gpio & 0x004000);\n\tis_lr90 = !(gpio & 0x002000); /* else LR26/LR50 (LR38/LR51 f. capture only) */\n\t/*\n\t * gpio & 0x001000 output bit for audio routing */\n\n\tif (is_capture_only)\n\t\ttuner_type = TUNER_ABSENT; /* No tuner present */\n\n\tpr_info(\"%d: FlyVideo Radio=%s RemoteControl=%s Tuner=%d gpio=0x%06x\\n\",\n\t\tbtv->c.nr, has_radio ? \"yes\" : \"no\",\n\t\thas_remote ? \"yes\" : \"no\", tuner_type, gpio);\n\tpr_info(\"%d: FlyVideo LR90=%s tda9821/tda9820=%s capture_only=%s\\n\",\n\t\tbtv->c.nr, is_lr90 ? \"yes\" : \"no\",\n\t\thas_tda9820_tda9821 ? \"yes\" : \"no\",\n\t\tis_capture_only ? \"yes\" : \"no\");\n\n\tif (tuner_type != UNSET) /* only set if known tuner autodetected, else let insmod option through */\n\t\tbtv->tuner_type = tuner_type;\n\tbtv->has_radio = has_radio;\n\n\t/* LR90 Audio Routing is done by 2 hef4052, so Audio_Mask has 4 bits: 0x001c80\n\t * LR26/LR50 only has 1 hef4052, Audio_Mask 0x000c00\n\t * Audio options: from tuner, from tda9821/tda9821(mono,stereo,sap), from tda9874, ext., mute */\n\tif (has_tda9820_tda9821)\n\t\tbtv->audio_mode_gpio = lt9415_audio;\n\t/* todo: if(has_tda9874) btv->audio_mode_gpio = fv2000s_audio; */\n}", "static int mn88443x_t_set_freq(struct mn88443x_priv *chip)\n{\n\tstruct device *dev = &chip->client_s->dev;\n\tstruct regmap *r_t = chip->regmap_t;\n\ts64 adckt, nco, ad_t;": "static int mn88443x_t_set_freq(struct mn88443x_priv *chip)\n{\n\tstruct device *dev = &chip->client_s->dev;\n\tstruct regmap *r_t = chip->regmap_t;\n\ts64 adckt, nco, ad_t;\n\tu32 m, v;\n\n\t/* Clock buffer (but not supported) or XTAL */\n\tif (chip->clk_freq >= CLK_LOW && chip->clk_freq < CLK_DIRECT) {\n\t\tchip->use_clkbuf = true;\n\t\tregmap_write(r_t, CLKSET1_T, 0x07);\n\n\t\tadckt = 0;\n\t} else {\n\t\tchip->use_clkbuf = false;\n\t\tregmap_write(r_t, CLKSET1_T, 0x00);\n\n\t\tadckt = chip->clk_freq;\n\t}\n\tif (!mn88443x_t_is_valid_clk(adckt, chip->if_freq)) {\n\t\tdev_err(dev, \"Invalid clock, CLK:%d, ADCKT:%lld, IF:%d\\n\",\n\t\t\tchip->clk_freq, adckt, chip->if_freq);\n\t\treturn -EINVAL;\n\t}\n\n\t/* Direct IF or Low IF */\n\tif (chip->if_freq == DIRECT_IF_57MHZ ||\n\t chip->if_freq == DIRECT_IF_44MHZ)\n\t\tnco = adckt * 2 - chip->if_freq;\n\telse\n\t\tnco = -((s64)chip->if_freq);\n\tnco = div_s64(nco << 24, adckt);\n\tad_t = div_s64(adckt << 22, S_T_FREQ);\n\n\tregmap_write(r_t, NCOFREQU_T, nco >> 16);\n\tregmap_write(r_t, NCOFREQM_T, nco >> 8);\n\tregmap_write(r_t, NCOFREQL_T, nco);\n\tregmap_write(r_t, FADU_T, ad_t >> 16);\n\tregmap_write(r_t, FADM_T, ad_t >> 8);\n\tregmap_write(r_t, FADL_T, ad_t);\n\n\t/* Level of IF */\n\tm = ADCSET1_T_REFSEL_MASK;\n\tv = ADCSET1_T_REFSEL_1_5V;\n\tregmap_update_bits(r_t, ADCSET1_T, m, v);\n\n\t/* Polarity of AGC */\n\tv = AGCSET2_T_IFPOLINV_INC | AGCSET2_T_RFPOLINV_INC;\n\tregmap_update_bits(r_t, AGCSET2_T, v, v);\n\n\t/* Lower output level of AGC */\n\tregmap_write(r_t, AGCV3_T, 0x00);\n\n\tregmap_write(r_t, MDSET_T, 0xfa);\n\n\treturn 0;\n}", "static int tg3_rx(struct tg3_napi *tnapi, int budget)\n{\n\tstruct tg3 *tp = tnapi->tp;\n\tu32 work_mask, rx_std_posted = 0;\n\tu32 std_prod_idx, jmb_prod_idx;": "static int tg3_rx(struct tg3_napi *tnapi, int budget)\n{\n\tstruct tg3 *tp = tnapi->tp;\n\tu32 work_mask, rx_std_posted = 0;\n\tu32 std_prod_idx, jmb_prod_idx;\n\tu32 sw_idx = tnapi->rx_rcb_ptr;\n\tu16 hw_idx;\n\tint received;\n\tstruct tg3_rx_prodring_set *tpr = &tnapi->prodring;\n\n\thw_idx = *(tnapi->rx_rcb_prod_idx);\n\t/*\n\t * We need to order the read of hw_idx and the read of\n\t * the opaque cookie.\n\t */\n\trmb();\n\twork_mask = 0;\n\treceived = 0;\n\tstd_prod_idx = tpr->rx_std_prod_idx;\n\tjmb_prod_idx = tpr->rx_jmb_prod_idx;\n\twhile (sw_idx != hw_idx && budget > 0) {\n\t\tstruct ring_info *ri;\n\t\tstruct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];\n\t\tunsigned int len;\n\t\tstruct sk_buff *skb;\n\t\tdma_addr_t dma_addr;\n\t\tu32 opaque_key, desc_idx, *post_ptr;\n\t\tu8 *data;\n\t\tu64 tstamp = 0;\n\n\t\tdesc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;\n\t\topaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;\n\t\tif (opaque_key == RXD_OPAQUE_RING_STD) {\n\t\t\tri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];\n\t\t\tdma_addr = dma_unmap_addr(ri, mapping);\n\t\t\tdata = ri->data;\n\t\t\tpost_ptr = &std_prod_idx;\n\t\t\trx_std_posted++;\n\t\t} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {\n\t\t\tri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];\n\t\t\tdma_addr = dma_unmap_addr(ri, mapping);\n\t\t\tdata = ri->data;\n\t\t\tpost_ptr = &jmb_prod_idx;\n\t\t} else\n\t\t\tgoto next_pkt_nopost;\n\n\t\twork_mask |= opaque_key;\n\n\t\tif (desc->err_vlan & RXD_ERR_MASK) {\n\t\tdrop_it:\n\t\t\ttg3_recycle_rx(tnapi, tpr, opaque_key,\n\t\t\t\t desc_idx, *post_ptr);\n\t\tdrop_it_no_recycle:\n\t\t\t/* Other statistics kept track of by card. */\n\t\t\ttp->rx_dropped++;\n\t\t\tgoto next_pkt;\n\t\t}\n\n\t\tprefetch(data + TG3_RX_OFFSET(tp));\n\t\tlen = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -\n\t\t ETH_FCS_LEN;\n\n\t\tif ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==\n\t\t RXD_FLAG_PTPSTAT_PTPV1 ||\n\t\t (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==\n\t\t RXD_FLAG_PTPSTAT_PTPV2) {\n\t\t\ttstamp = tr32(TG3_RX_TSTAMP_LSB);\n\t\t\ttstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;\n\t\t}\n\n\t\tif (len > TG3_RX_COPY_THRESH(tp)) {\n\t\t\tint skb_size;\n\t\t\tunsigned int frag_size;\n\n\t\t\tskb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,\n\t\t\t\t\t\t *post_ptr, &frag_size);\n\t\t\tif (skb_size < 0)\n\t\t\t\tgoto drop_it;\n\n\t\t\tdma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,\n\t\t\t\t\t DMA_FROM_DEVICE);\n\n\t\t\t/* Ensure that the update to the data happens\n\t\t\t * after the usage of the old DMA mapping.\n\t\t\t */\n\t\t\tsmp_wmb();\n\n\t\t\tri->data = NULL;\n\n\t\t\tskb = build_skb(data, frag_size);\n\t\t\tif (!skb) {\n\t\t\t\ttg3_frag_free(frag_size != 0, data);\n\t\t\t\tgoto drop_it_no_recycle;\n\t\t\t}\n\t\t\tskb_reserve(skb, TG3_RX_OFFSET(tp));\n\t\t} else {\n\t\t\ttg3_recycle_rx(tnapi, tpr, opaque_key,\n\t\t\t\t desc_idx, *post_ptr);\n\n\t\t\tskb = netdev_alloc_skb(tp->dev,\n\t\t\t\t\t len + TG3_RAW_IP_ALIGN);\n\t\t\tif (skb == NULL)\n\t\t\t\tgoto drop_it_no_recycle;\n\n\t\t\tskb_reserve(skb, TG3_RAW_IP_ALIGN);\n\t\t\tdma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,\n\t\t\t\t\t\tDMA_FROM_DEVICE);\n\t\t\tmemcpy(skb->data,\n\t\t\t data + TG3_RX_OFFSET(tp),\n\t\t\t len);\n\t\t\tdma_sync_single_for_device(&tp->pdev->dev, dma_addr,\n\t\t\t\t\t\t len, DMA_FROM_DEVICE);\n\t\t}\n\n\t\tskb_put(skb, len);\n\t\tif (tstamp)\n\t\t\ttg3_hwclock_to_timestamp(tp, tstamp,\n\t\t\t\t\t\t skb_hwtstamps(skb));\n\n\t\tif ((tp->dev->features & NETIF_F_RXCSUM) &&\n\t\t (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&\n\t\t (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)\n\t\t >> RXD_TCPCSUM_SHIFT) == 0xffff))\n\t\t\tskb->ip_summed = CHECKSUM_UNNECESSARY;\n\t\telse\n\t\t\tskb_checksum_none_assert(skb);\n\n\t\tskb->protocol = eth_type_trans(skb, tp->dev);\n\n\t\tif (len > (tp->dev->mtu + ETH_HLEN) &&\n\t\t skb->protocol != htons(ETH_P_8021Q) &&\n\t\t skb->protocol != htons(ETH_P_8021AD)) {\n\t\t\tdev_kfree_skb_any(skb);\n\t\t\tgoto drop_it_no_recycle;\n\t\t}\n\n\t\tif (desc->type_flags & RXD_FLAG_VLAN &&\n\t\t !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))\n\t\t\t__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),\n\t\t\t\t\t desc->err_vlan & RXD_VLAN_MASK);\n\n\t\tnapi_gro_receive(&tnapi->napi, skb);\n\n\t\treceived++;\n\t\tbudget--;\n\nnext_pkt:\n\t\t(*post_ptr)++;\n\n\t\tif (unlikely(rx_std_posted >= tp->rx_std_max_post)) {\n\t\t\ttpr->rx_std_prod_idx = std_prod_idx &\n\t\t\t\t\t tp->rx_std_ring_mask;\n\t\t\ttw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,\n\t\t\t\t tpr->rx_std_prod_idx);\n\t\t\twork_mask &= ~RXD_OPAQUE_RING_STD;\n\t\t\trx_std_posted = 0;\n\t\t}\nnext_pkt_nopost:\n\t\tsw_idx++;\n\t\tsw_idx &= tp->rx_ret_ring_mask;\n\n\t\t/* Refresh hw_idx to see if there is new work */\n\t\tif (sw_idx == hw_idx) {\n\t\t\thw_idx = *(tnapi->rx_rcb_prod_idx);\n\t\t\trmb();\n\t\t}\n\t}\n\n\t/* ACK the status ring. */\n\ttnapi->rx_rcb_ptr = sw_idx;\n\ttw32_rx_mbox(tnapi->consmbox, sw_idx);\n\n\t/* Refill RX ring(s). */\n\tif (!tg3_flag(tp, ENABLE_RSS)) {\n\t\t/* Sync BD data before updating mailbox */\n\t\twmb();\n\n\t\tif (work_mask & RXD_OPAQUE_RING_STD) {\n\t\t\ttpr->rx_std_prod_idx = std_prod_idx &\n\t\t\t\t\t tp->rx_std_ring_mask;\n\t\t\ttw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,\n\t\t\t\t tpr->rx_std_prod_idx);\n\t\t}\n\t\tif (work_mask & RXD_OPAQUE_RING_JUMBO) {\n\t\t\ttpr->rx_jmb_prod_idx = jmb_prod_idx &\n\t\t\t\t\t tp->rx_jmb_ring_mask;\n\t\t\ttw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,\n\t\t\t\t tpr->rx_jmb_prod_idx);\n\t\t}\n\t} else if (work_mask) {\n\t\t/* rx_std_buffers[] and rx_jmb_buffers[] entries must be\n\t\t * updated before the producer indices can be updated.\n\t\t */\n\t\tsmp_wmb();\n\n\t\ttpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;\n\t\ttpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;\n\n\t\tif (tnapi != &tp->napi[1]) {\n\t\t\ttp->rx_refill = true;\n\t\t\tnapi_schedule(&tp->napi[1].napi);\n\t\t}\n\t}\n\n\treturn received;\n}", "static int test_syswide_multi_diff_addr_ro_wo(void)\n{\n\tunsigned long long breaks1 = 0, breaks2 = 0;\n\tint *fd1 = malloc(nprocs * sizeof(int));\n\tint *fd2 = malloc(nprocs * sizeof(int));": "static int test_syswide_multi_diff_addr_ro_wo(void)\n{\n\tunsigned long long breaks1 = 0, breaks2 = 0;\n\tint *fd1 = malloc(nprocs * sizeof(int));\n\tint *fd2 = malloc(nprocs * sizeof(int));\n\tchar *desc = \"Systemwide, Two events, diff addr, one is RO, other is WO\";\n\tint ret;\n\n\tret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));\n\tif (ret) {\n\t\tperror(\"perf_systemwide_event_open\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\tret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_R, (__u64)&b, (__u64)sizeof(b));\n\tif (ret) {\n\t\tclose_fds(fd1, nprocs);\n\t\tperror(\"perf_systemwide_event_open\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\treset_fds(fd1, nprocs);\n\treset_fds(fd2, nprocs);\n\tenable_fds(fd1, nprocs);\n\tenable_fds(fd2, nprocs);\n\tmulti_dawr_workload();\n\tdisable_fds(fd1, nprocs);\n\tdisable_fds(fd2, nprocs);\n\n\tbreaks1 = read_fds(fd1, nprocs);\n\tbreaks2 = read_fds(fd2, nprocs);\n\n\tclose_fds(fd1, nprocs);\n\tclose_fds(fd2, nprocs);\n\n\tfree(fd1);\n\tfree(fd2);\n\n\tif (breaks1 != 1 || breaks2 != 1) {\n\t\tprintf(\"FAILED: %s: %lld != 1 || %lld != 1\\n\", desc, breaks1, breaks2);\n\t\treturn 1;\n\t}\n\n\tprintf(\"TESTED: %s\\n\", desc);\n\treturn 0;\n}", "static void localtime_3(struct xtm *r, time64_t time)\n{\n\tunsigned int year, i, w = r->dse;\n\n\t/*": "static void localtime_3(struct xtm *r, time64_t time)\n{\n\tunsigned int year, i, w = r->dse;\n\n\t/*\n\t * In each year, a certain number of days-since-the-epoch have passed.\n\t * Find the year that is closest to said days.\n\t *\n\t * Consider, for example, w=21612 (2029-03-04). Loop will abort on\n\t * dse[i] <= w, which happens when dse[i] == 21550. This implies\n\t * year == 2009. w will then be 62.\n\t */\n\tfor (i = 0, year = DSE_FIRST; days_since_epoch[i] > w;\n\t ++i, --year)\n\t\t/* just loop */;\n\n\tw -= days_since_epoch[i];\n\n\t/*\n\t * By now we have the current year, and the day of the year.\n\t * r->yearday = w;\n\t *\n\t * On to finding the month (like above). In each month, a certain\n\t * number of days-since-New Year have passed, and find the closest\n\t * one.\n\t *\n\t * Consider w=62 (in a non-leap year). Loop will abort on\n\t * dsy[i] < w, which happens when dsy[i] == 31+28 (i == 2).\n\t * Concludes i == 2, i.e. 3rd month => March.\n\t *\n\t * (A different approach to use would be to subtract a monthlength\n\t * from w repeatedly while counting.)\n\t */\n\tif (is_leap(year)) {\n\t\t/* use days_since_leapyear[] in a leap year */\n\t\tfor (i = ARRAY_SIZE(days_since_leapyear) - 1;\n\t\t i > 0 && days_since_leapyear[i] > w; --i)\n\t\t\t/* just loop */;\n\t\tr->monthday = w - days_since_leapyear[i] + 1;\n\t} else {\n\t\tfor (i = ARRAY_SIZE(days_since_year) - 1;\n\t\t i > 0 && days_since_year[i] > w; --i)\n\t\t\t/* just loop */;\n\t\tr->monthday = w - days_since_year[i] + 1;\n\t}\n\n\tr->month = i + 1;\n}", "static void ae5_post_dsp_startup_data(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\n\tmutex_lock(&spec->chipio_mutex);": "static void ae5_post_dsp_startup_data(struct hda_codec *codec)\n{\n\tstruct ca0132_spec *spec = codec->spec;\n\n\tmutex_lock(&spec->chipio_mutex);\n\n\tchipio_write_no_mutex(codec, 0x189000, 0x0001f101);\n\tchipio_write_no_mutex(codec, 0x189004, 0x0001f101);\n\tchipio_write_no_mutex(codec, 0x189024, 0x00014004);\n\tchipio_write_no_mutex(codec, 0x189028, 0x0002000f);\n\n\tca0113_mmio_command_set(codec, 0x48, 0x0a, 0x05);\n\tchipio_set_control_param_no_mutex(codec, CONTROL_PARAM_ASI, 7);\n\tca0113_mmio_command_set(codec, 0x48, 0x0b, 0x12);\n\tca0113_mmio_command_set(codec, 0x48, 0x04, 0x00);\n\tca0113_mmio_command_set(codec, 0x48, 0x06, 0x48);\n\tca0113_mmio_command_set(codec, 0x48, 0x0a, 0x05);\n\tca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);\n\tca0113_mmio_command_set(codec, 0x48, 0x0f, 0x00);\n\tca0113_mmio_command_set(codec, 0x48, 0x10, 0x00);\n\tca0113_mmio_gpio_set(codec, 0, true);\n\tca0113_mmio_gpio_set(codec, 1, true);\n\tca0113_mmio_command_set(codec, 0x48, 0x07, 0x80);\n\n\tchipio_write_no_mutex(codec, 0x18b03c, 0x00000012);\n\n\tca0113_mmio_command_set(codec, 0x48, 0x0f, 0x00);\n\tca0113_mmio_command_set(codec, 0x48, 0x10, 0x00);\n\n\tmutex_unlock(&spec->chipio_mutex);\n}", "static void reset_fim(struct imx_media_fim *fim, bool curval)\n{\n\tstruct v4l2_ctrl *icap_chan = fim->icap_ctrl[FIM_CL_ICAP_CHANNEL];\n\tstruct v4l2_ctrl *icap_edge = fim->icap_ctrl[FIM_CL_ICAP_EDGE];\n\tstruct v4l2_ctrl *en = fim->ctrl[FIM_CL_ENABLE];": "static void reset_fim(struct imx_media_fim *fim, bool curval)\n{\n\tstruct v4l2_ctrl *icap_chan = fim->icap_ctrl[FIM_CL_ICAP_CHANNEL];\n\tstruct v4l2_ctrl *icap_edge = fim->icap_ctrl[FIM_CL_ICAP_EDGE];\n\tstruct v4l2_ctrl *en = fim->ctrl[FIM_CL_ENABLE];\n\tstruct v4l2_ctrl *num = fim->ctrl[FIM_CL_NUM];\n\tstruct v4l2_ctrl *skip = fim->ctrl[FIM_CL_NUM_SKIP];\n\tstruct v4l2_ctrl *tol_min = fim->ctrl[FIM_CL_TOLERANCE_MIN];\n\tstruct v4l2_ctrl *tol_max = fim->ctrl[FIM_CL_TOLERANCE_MAX];\n\n\tif (curval) {\n\t\tfim->enabled = en->cur.val;\n\t\tfim->icap_flags = icap_edge->cur.val;\n\t\tfim->icap_channel = icap_chan->cur.val;\n\t\tfim->num_avg = num->cur.val;\n\t\tfim->num_skip = skip->cur.val;\n\t\tfim->tolerance_min = tol_min->cur.val;\n\t\tfim->tolerance_max = tol_max->cur.val;\n\t} else {\n\t\tfim->enabled = en->val;\n\t\tfim->icap_flags = icap_edge->val;\n\t\tfim->icap_channel = icap_chan->val;\n\t\tfim->num_avg = num->val;\n\t\tfim->num_skip = skip->val;\n\t\tfim->tolerance_min = tol_min->val;\n\t\tfim->tolerance_max = tol_max->val;\n\t}\n\n\t/* disable tolerance range if max <= min */\n\tif (fim->tolerance_max <= fim->tolerance_min)\n\t\tfim->tolerance_max = 0;\n\n\t/* num_skip must be >= 1 if input capture not used */\n\tif (!icap_enabled(fim))\n\t\tfim->num_skip = max_t(int, fim->num_skip, 1);\n\n\tfim->counter = -fim->num_skip;\n\tfim->sum = 0;\n}", "static int imgu_css_binary_setup(struct imgu_css *css, unsigned int pipe)\n{\n\tstruct imgu_css_pipe *css_pipe = &css->pipes[pipe];\n\tstruct imgu_fw_info *bi = &css->fwp->binary_header[css_pipe->bindex];\n\tstruct imgu_device *imgu = dev_get_drvdata(css->dev);": "static int imgu_css_binary_setup(struct imgu_css *css, unsigned int pipe)\n{\n\tstruct imgu_css_pipe *css_pipe = &css->pipes[pipe];\n\tstruct imgu_fw_info *bi = &css->fwp->binary_header[css_pipe->bindex];\n\tstruct imgu_device *imgu = dev_get_drvdata(css->dev);\n\tint i, j, size;\n\tstatic const int BYPC = 2;\t/* Bytes per component */\n\tunsigned int w, h;\n\n\t/* Allocate parameter memory blocks for this binary */\n\n\tfor (j = IMGU_ABI_PARAM_CLASS_CONFIG; j < IMGU_ABI_PARAM_CLASS_NUM; j++)\n\t\tfor (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++) {\n\t\t\tif (imgu_css_dma_buffer_resize(\n\t\t\t imgu,\n\t\t\t &css_pipe->binary_params_cs[j - 1][i],\n\t\t\t bi->info.isp.sp.mem_initializers.params[j][i].size))\n\t\t\t\tgoto out_of_memory;\n\t\t}\n\n\t/* Allocate internal frame buffers */\n\n\t/* Reference frames for DVS, FRAME_FORMAT_YUV420_16 */\n\tcss_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel = BYPC;\n\tcss_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].width =\n\t\t\t\t\tcss_pipe->rect[IPU3_CSS_RECT_BDS].width;\n\tcss_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height =\n\t\t\t\tALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].height,\n\t\t\t\t IMGU_DVS_BLOCK_H) + 2 * IMGU_GDC_BUF_Y;\n\th = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;\n\tw = ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,\n\t\t 2 * IPU3_UAPI_ISP_VEC_ELEMS) + 2 * IMGU_GDC_BUF_X;\n\tcss_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline =\n\t\tcss_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel * w;\n\tsize = w * h * BYPC + (w / 2) * (h / 2) * BYPC * 2;\n\tfor (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)\n\t\tif (imgu_css_dma_buffer_resize(\n\t\t\timgu,\n\t\t\t&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i],\n\t\t\tsize))\n\t\t\tgoto out_of_memory;\n\n\t/* TNR frames for temporal noise reduction, FRAME_FORMAT_YUV_LINE */\n\tcss_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperpixel = 1;\n\tcss_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width =\n\t\t\troundup(css_pipe->rect[IPU3_CSS_RECT_GDC].width,\n\t\t\t\tbi->info.isp.sp.block.block_width *\n\t\t\t\tIPU3_UAPI_ISP_VEC_ELEMS);\n\tcss_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height =\n\t\t\troundup(css_pipe->rect[IPU3_CSS_RECT_GDC].height,\n\t\t\t\tbi->info.isp.sp.block.output_block_height);\n\n\tw = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;\n\tcss_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline = w;\n\th = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;\n\tsize = w * ALIGN(h * 3 / 2 + 3, 2);\t/* +3 for vf_pp prefetch */\n\tfor (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)\n\t\tif (imgu_css_dma_buffer_resize(\n\t\t\timgu,\n\t\t\t&css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i],\n\t\t\tsize))\n\t\t\tgoto out_of_memory;\n\n\treturn 0;\n\nout_of_memory:\n\timgu_css_binary_cleanup(css, pipe);\n\treturn -ENOMEM;\n}", "static void lpss_iosf_enter_d3_state(void)\n{\n\tu32 value1 = 0;\n\tu32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;\n\tu32 value2 = LPSS_PMCSR_D3hot;": "static void lpss_iosf_enter_d3_state(void)\n{\n\tu32 value1 = 0;\n\tu32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;\n\tu32 value2 = LPSS_PMCSR_D3hot;\n\tu32 mask2 = LPSS_PMCSR_Dx_MASK;\n\t/*\n\t * PMC provides an information about actual status of the LPSS devices.\n\t * Here we read the values related to LPSS power island, i.e. LPSS\n\t * devices, excluding both LPSS DMA controllers, along with SCC domain.\n\t */\n\tu32 func_dis, d3_sts_0, pmc_status;\n\tint ret;\n\n\tret = pmc_atom_read(PMC_FUNC_DIS, &func_dis);\n\tif (ret)\n\t\treturn;\n\n\tmutex_lock(&lpss_iosf_mutex);\n\n\tret = pmc_atom_read(PMC_D3_STS_0, &d3_sts_0);\n\tif (ret)\n\t\tgoto exit;\n\n\t/*\n\t * Get the status of entire LPSS power island per device basis.\n\t * Shutdown both LPSS DMA controllers if and only if all other devices\n\t * are already in D3hot.\n\t */\n\tpmc_status = (~(d3_sts_0 | func_dis)) & pmc_atom_d3_mask;\n\tif (pmc_status)\n\t\tgoto exit;\n\n\tiosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,\n\t\t\tLPSS_IOSF_PMCSR, value2, mask2);\n\n\tiosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2, MBI_CFG_WRITE,\n\t\t\tLPSS_IOSF_PMCSR, value2, mask2);\n\n\tiosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,\n\t\t\tLPSS_IOSF_GPIODEF0, value1, mask1);\n\n\tlpss_iosf_d3_entered = true;\n\nexit:\n\tmutex_unlock(&lpss_iosf_mutex);\n}", "static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])\n{\n\tstruct smc_llc_msg_test_link *testllc;\n\tstruct smc_wr_tx_pend_priv *pend;\n\tstruct smc_wr_buf *wr_buf;": "static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])\n{\n\tstruct smc_llc_msg_test_link *testllc;\n\tstruct smc_wr_tx_pend_priv *pend;\n\tstruct smc_wr_buf *wr_buf;\n\tint rc;\n\n\tif (!smc_wr_tx_link_hold(link))\n\t\treturn -ENOLINK;\n\trc = smc_llc_add_pending_send(link, &wr_buf, &pend);\n\tif (rc)\n\t\tgoto put_out;\n\ttestllc = (struct smc_llc_msg_test_link *)wr_buf;\n\tmemset(testllc, 0, sizeof(*testllc));\n\ttestllc->hd.common.llc_type = SMC_LLC_TEST_LINK;\n\tsmc_llc_init_msg_hdr(&testllc->hd, link->lgr, sizeof(*testllc));\n\tmemcpy(testllc->user_data, user_data, sizeof(testllc->user_data));\n\t/* send llc message */\n\trc = smc_wr_tx_send(link, pend);\nput_out:\n\tsmc_wr_tx_link_put(link);\n\treturn rc;\n}", "static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)\n{\n\ttg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);\n\ttg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);\n\ttg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);": "static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)\n{\n\ttg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);\n\ttg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);\n\ttg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);\n\ttg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);\n\ttg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);\n\ttg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);\n\ttg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);\n\ttg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);\n\ttg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);\n\ttg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);\n\ttg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);\n\ttg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);\n\ttg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);\n\ttg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);\n\ttg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);\n\ttg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);\n\ttg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);\n\ttg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);\n\ttg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);\n\n\tif (tg3_flag(tp, SUPPORT_MSIX))\n\t\ttg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);\n\n\ttg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);\n\ttg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);\n\ttg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);\n\ttg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);\n\ttg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);\n\ttg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);\n\ttg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);\n\ttg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);\n\n\tif (!tg3_flag(tp, 5705_PLUS)) {\n\t\ttg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);\n\t\ttg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);\n\t\ttg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);\n\t}\n\n\ttg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);\n\ttg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);\n\ttg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);\n\ttg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);\n\ttg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);\n\n\tif (tg3_flag(tp, NVRAM))\n\t\ttg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);\n}", "static int __init test_h6(struct crypto_shash *tfm_cmac)\n{\n\tconst u8 w[16] = {\n\t\t\t0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,\n\t\t\t0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };": "static int __init test_h6(struct crypto_shash *tfm_cmac)\n{\n\tconst u8 w[16] = {\n\t\t\t0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,\n\t\t\t0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec };\n\tconst u8 key_id[4] = { 0x72, 0x62, 0x65, 0x6c };\n\tconst u8 exp[16] = {\n\t\t\t0x99, 0x63, 0xb1, 0x80, 0xe2, 0xa9, 0xd3, 0xe8,\n\t\t\t0x1c, 0xc9, 0x6d, 0xe7, 0x02, 0xe1, 0x9a, 0x2d };\n\tu8 res[16];\n\tint err;\n\n\terr = smp_h6(tfm_cmac, w, key_id, res);\n\tif (err)\n\t\treturn err;\n\n\tif (crypto_memneq(res, exp, 16))\n\t\treturn -EINVAL;\n\n\treturn 0;\n}", "static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)\n{\n\tstruct cpt_ctx *ctx = filp->private;\n\tu64 busy_sts = 0, free_sts = 0;\n\tu32 e_min = 0, e_max = 0, e, i;": "static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)\n{\n\tstruct cpt_ctx *ctx = filp->private;\n\tu64 busy_sts = 0, free_sts = 0;\n\tu32 e_min = 0, e_max = 0, e, i;\n\tu16 max_ses, max_ies, max_aes;\n\tstruct rvu *rvu = ctx->rvu;\n\tint blkaddr = ctx->blkaddr;\n\tu64 reg;\n\n\treg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);\n\tmax_ses = reg & 0xffff;\n\tmax_ies = (reg >> 16) & 0xffff;\n\tmax_aes = (reg >> 32) & 0xffff;\n\n\tswitch (eng_type) {\n\tcase CPT_AE_TYPE:\n\t\te_min = max_ses + max_ies;\n\t\te_max = max_ses + max_ies + max_aes;\n\t\tbreak;\n\tcase CPT_SE_TYPE:\n\t\te_min = 0;\n\t\te_max = max_ses;\n\t\tbreak;\n\tcase CPT_IE_TYPE:\n\t\te_min = max_ses;\n\t\te_max = max_ses + max_ies;\n\t\tbreak;\n\tdefault:\n\t\treturn -EINVAL;\n\t}\n\n\tfor (e = e_min, i = 0; e < e_max; e++, i++) {\n\t\treg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));\n\t\tif (reg & 0x1)\n\t\t\tbusy_sts |= 1ULL << i;\n\n\t\tif (reg & 0x2)\n\t\t\tfree_sts |= 1ULL << i;\n\t}\n\tseq_printf(filp, \"FREE STS : 0x%016llx\\n\", free_sts);\n\tseq_printf(filp, \"BUSY STS : 0x%016llx\\n\", busy_sts);\n\n\treturn 0;\n}", "static int test_syswide_multi_same_addr_ro_wo(void)\n{\n\tunsigned long long breaks1 = 0, breaks2 = 0;\n\tint *fd1 = malloc(nprocs * sizeof(int));\n\tint *fd2 = malloc(nprocs * sizeof(int));": "static int test_syswide_multi_same_addr_ro_wo(void)\n{\n\tunsigned long long breaks1 = 0, breaks2 = 0;\n\tint *fd1 = malloc(nprocs * sizeof(int));\n\tint *fd2 = malloc(nprocs * sizeof(int));\n\tchar *desc = \"Systemwide, Two events, same addr, one is RO, other is WO\";\n\tint ret;\n\n\tret = perf_systemwide_event_open(fd1, HW_BREAKPOINT_W, (__u64)&a, (__u64)sizeof(a));\n\tif (ret) {\n\t\tperror(\"perf_systemwide_event_open\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\tret = perf_systemwide_event_open(fd2, HW_BREAKPOINT_R, (__u64)&a, (__u64)sizeof(a));\n\tif (ret) {\n\t\tclose_fds(fd1, nprocs);\n\t\tperror(\"perf_systemwide_event_open\");\n\t\texit(EXIT_FAILURE);\n\t}\n\n\treset_fds(fd1, nprocs);\n\treset_fds(fd2, nprocs);\n\tenable_fds(fd1, nprocs);\n\tenable_fds(fd2, nprocs);\n\tmulti_dawr_workload();\n\tdisable_fds(fd1, nprocs);\n\tdisable_fds(fd2, nprocs);\n\n\tbreaks1 = read_fds(fd1, nprocs);\n\tbreaks2 = read_fds(fd2, nprocs);\n\n\tclose_fds(fd1, nprocs);\n\tclose_fds(fd2, nprocs);\n\n\tfree(fd1);\n\tfree(fd2);\n\n\tif (breaks1 != 1 || breaks2 != 1) {\n\t\tprintf(\"FAILED: %s: %lld != 1 || %lld != 1\\n\", desc, breaks1, breaks2);\n\t\treturn 1;\n\t}\n\n\tprintf(\"TESTED: %s\\n\", desc);\n\treturn 0;\n}", "static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)\n{\n\tint i = 0, hard_lp_ctr = 100000;\n\tu64 inprog, grp_ptr;\n\tu16 nq_ptr, dq_ptr;": "static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)\n{\n\tint i = 0, hard_lp_ctr = 100000;\n\tu64 inprog, grp_ptr;\n\tu16 nq_ptr, dq_ptr;\n\n\t/* Disable instructions enqueuing */\n\trvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);\n\n\t/* Disable executions in the LF's queue */\n\tinprog = rvu_read64(rvu, blkaddr,\n\t\t\t CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));\n\tinprog &= ~BIT_ULL(16);\n\trvu_write64(rvu, blkaddr,\n\t\t CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);\n\n\t/* Wait for CPT queue to become execution-quiescent */\n\tdo {\n\t\tinprog = rvu_read64(rvu, blkaddr,\n\t\t\t\t CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));\n\t\tif (INPROG_GRB_PARTIAL(inprog)) {\n\t\t\ti = 0;\n\t\t\thard_lp_ctr--;\n\t\t} else {\n\t\t\ti++;\n\t\t}\n\n\t\tgrp_ptr = rvu_read64(rvu, blkaddr,\n\t\t\t\t CPT_AF_BAR2_ALIASX(slot,\n\t\t\t\t\t\t\tCPT_LF_Q_GRP_PTR));\n\t\tnq_ptr = (grp_ptr >> 32) & 0x7FFF;\n\t\tdq_ptr = grp_ptr & 0x7FFF;\n\n\t} while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));\n\n\tif (hard_lp_ctr == 0)\n\t\tdev_warn(rvu->dev, \"CPT FLR hits hard loop counter\\n\");\n\n\ti = 0;\n\thard_lp_ctr = 100000;\n\tdo {\n\t\tinprog = rvu_read64(rvu, blkaddr,\n\t\t\t\t CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));\n\n\t\tif ((INPROG_INFLIGHT(inprog) == 0) &&\n\t\t (INPROG_GWB(inprog) < 40) &&\n\t\t ((INPROG_GRB(inprog) == 0) ||\n\t\t (INPROG_GRB((inprog)) == 40))) {\n\t\t\ti++;\n\t\t} else {\n\t\t\ti = 0;\n\t\t\thard_lp_ctr--;\n\t\t}\n\t} while (hard_lp_ctr && (i < 10));\n\n\tif (hard_lp_ctr == 0)\n\t\tdev_warn(rvu->dev, \"CPT FLR hits hard loop counter\\n\");\n}", "static int san_events_register(struct platform_device *pdev)\n{\n\tstruct san_data *d = platform_get_drvdata(pdev);\n\tint status;\n": "static int san_events_register(struct platform_device *pdev)\n{\n\tstruct san_data *d = platform_get_drvdata(pdev);\n\tint status;\n\n\td->nf_bat.base.priority = 1;\n\td->nf_bat.base.fn = san_evt_bat_nf;\n\td->nf_bat.event.reg = SSAM_EVENT_REGISTRY_SAM;\n\td->nf_bat.event.id.target_category = SSAM_SSH_TC_BAT;\n\td->nf_bat.event.id.instance = 0;\n\td->nf_bat.event.mask = SSAM_EVENT_MASK_TARGET;\n\td->nf_bat.event.flags = SSAM_EVENT_SEQUENCED;\n\n\td->nf_tmp.base.priority = 1;\n\td->nf_tmp.base.fn = san_evt_tmp_nf;\n\td->nf_tmp.event.reg = SSAM_EVENT_REGISTRY_SAM;\n\td->nf_tmp.event.id.target_category = SSAM_SSH_TC_TMP;\n\td->nf_tmp.event.id.instance = 0;\n\td->nf_tmp.event.mask = SSAM_EVENT_MASK_TARGET;\n\td->nf_tmp.event.flags = SSAM_EVENT_SEQUENCED;\n\n\tstatus = ssam_notifier_register(d->ctrl, &d->nf_bat);\n\tif (status)\n\t\treturn status;\n\n\tstatus = ssam_notifier_register(d->ctrl, &d->nf_tmp);\n\tif (status)\n\t\tssam_notifier_unregister(d->ctrl, &d->nf_bat);\n\n\treturn status;\n}", "static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)\n{\n\t/* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */\n#define PUSH_CNT 51\n\t/* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */": "static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)\n{\n\t/* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */\n#define PUSH_CNT 51\n\t/* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */\n\tunsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;\n\tstruct bpf_insn *insn = self->fill_insns;\n\tint i = 0, j, k = 0;\n\n\tinsn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);\nloop:\n\tfor (j = 0; j < PUSH_CNT; j++) {\n\t\tinsn[i++] = BPF_LD_ABS(BPF_B, 0);\n\t\t/* jump to error label */\n\t\tinsn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);\n\t\ti++;\n\t\tinsn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);\n\t\tinsn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);\n\t\tinsn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);\n\t\tinsn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,\n\t\t\t\t\t BPF_FUNC_skb_vlan_push),\n\t\tinsn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);\n\t\ti++;\n\t}\n\n\tfor (j = 0; j < PUSH_CNT; j++) {\n\t\tinsn[i++] = BPF_LD_ABS(BPF_B, 0);\n\t\tinsn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);\n\t\ti++;\n\t\tinsn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);\n\t\tinsn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,\n\t\t\t\t\t BPF_FUNC_skb_vlan_pop),\n\t\tinsn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);\n\t\ti++;\n\t}\n\tif (++k < 5)\n\t\tgoto loop;\n\n\tfor (; i < len - 3; i++)\n\t\tinsn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);\n\tinsn[len - 3] = BPF_JMP_A(1);\n\t/* error label */\n\tinsn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);\n\tinsn[len - 1] = BPF_EXIT_INSN();\n\tself->prog_len = len;\n}", "static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)\n{\n\tstruct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;\n\tstruct smc_link *link = conn->lnk;\n\tstruct smc_rdma_wr *wr_rdma_buf;": "static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)\n{\n\tstruct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;\n\tstruct smc_link *link = conn->lnk;\n\tstruct smc_rdma_wr *wr_rdma_buf;\n\tstruct smc_cdc_tx_pend *pend;\n\tstruct smc_wr_buf *wr_buf;\n\tint rc;\n\n\tif (!link || !smc_wr_tx_link_hold(link))\n\t\treturn -ENOLINK;\n\trc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend);\n\tif (rc < 0) {\n\t\tsmc_wr_tx_link_put(link);\n\t\tif (rc == -EBUSY) {\n\t\t\tstruct smc_sock *smc =\n\t\t\t\tcontainer_of(conn, struct smc_sock, conn);\n\n\t\t\tif (smc->sk.sk_err == ECONNABORTED)\n\t\t\t\treturn sock_error(&smc->sk);\n\t\t\tif (conn->killed)\n\t\t\t\treturn -EPIPE;\n\t\t\trc = 0;\n\t\t\tmod_delayed_work(conn->lgr->tx_wq, &conn->tx_work,\n\t\t\t\t\t SMC_TX_WORK_DELAY);\n\t\t}\n\t\treturn rc;\n\t}\n\n\tspin_lock_bh(&conn->send_lock);\n\tif (link != conn->lnk) {\n\t\t/* link of connection changed, tx_work will restart */\n\t\tsmc_wr_tx_put_slot(link,\n\t\t\t\t (struct smc_wr_tx_pend_priv *)pend);\n\t\trc = -ENOLINK;\n\t\tgoto out_unlock;\n\t}\n\tif (!pflags->urg_data_present) {\n\t\trc = smc_tx_rdma_writes(conn, wr_rdma_buf);\n\t\tif (rc) {\n\t\t\tsmc_wr_tx_put_slot(link,\n\t\t\t\t\t (struct smc_wr_tx_pend_priv *)pend);\n\t\t\tgoto out_unlock;\n\t\t}\n\t}\n\n\trc = smc_cdc_msg_send(conn, wr_buf, pend);\n\tif (!rc && pflags->urg_data_present) {\n\t\tpflags->urg_data_pending = 0;\n\t\tpflags->urg_data_present = 0;\n\t}\n\nout_unlock:\n\tspin_unlock_bh(&conn->send_lock);\n\tsmc_wr_tx_link_put(link);\n\treturn rc;\n}", "static bool __hci_update_interleaved_scan(struct hci_dev *hdev)\n{\n\t/* Do interleaved scan only if all of the following are true:\n\t * - There is at least one ADV monitor\n\t * - At least one pending LE connection or one device to be scanned for": "static bool __hci_update_interleaved_scan(struct hci_dev *hdev)\n{\n\t/* Do interleaved scan only if all of the following are true:\n\t * - There is at least one ADV monitor\n\t * - At least one pending LE connection or one device to be scanned for\n\t * - Monitor offloading is not supported\n\t * If so, we should alternate between allowlist scan and one without\n\t * any filters to save power.\n\t */\n\tbool use_interleaving = hci_is_adv_monitoring(hdev) &&\n\t\t\t\t!(list_empty(&hdev->pend_le_conns) &&\n\t\t\t\t list_empty(&hdev->pend_le_reports)) &&\n\t\t\t\thci_get_adv_monitor_offload_ext(hdev) ==\n\t\t\t\t HCI_ADV_MONITOR_EXT_NONE;\n\tbool is_interleaving = is_interleave_scanning(hdev);\n\n\tif (use_interleaving && !is_interleaving) {\n\t\tstart_interleave_scan(hdev);\n\t\tbt_dev_dbg(hdev, \"starting interleave scan\");\n\t\treturn true;\n\t}\n\n\tif (!use_interleaving && is_interleaving)\n\t\tcancel_interleave_scan(hdev);\n\n\treturn false;\n}", "static int yuvpp_start(struct ia_css_pipe *pipe)\n{\n\tint err = 0;\n\tenum sh_css_pipe_config_override copy_ovrd;\n\tenum ia_css_input_mode yuvpp_pipe_input_mode;": "static int yuvpp_start(struct ia_css_pipe *pipe)\n{\n\tint err = 0;\n\tenum sh_css_pipe_config_override copy_ovrd;\n\tenum ia_css_input_mode yuvpp_pipe_input_mode;\n\tunsigned int thread_id;\n\n\tIA_CSS_ENTER_PRIVATE(\"pipe = %p\", pipe);\n\tif ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {\n\t\tIA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);\n\t\treturn -EINVAL;\n\t}\n\n\tyuvpp_pipe_input_mode = pipe->stream->config.mode;\n\n\tsh_css_metrics_start_frame();\n\n\t/* multi stream video needs mipi buffers */\n\n\terr = send_mipi_frames(pipe);\n\tif (err) {\n\t\tIA_CSS_LEAVE_ERR_PRIVATE(err);\n\t\treturn err;\n\t}\n\n\tia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);\n\tcopy_ovrd = 1 << thread_id;\n\n\tstart_pipe(pipe, copy_ovrd, yuvpp_pipe_input_mode);\n\n\tIA_CSS_LEAVE_ERR_PRIVATE(err);\n\treturn err;\n}", "static int set_vsb(struct drx_demod_instance *demod)\n{\n\tstruct i2c_device_addr *dev_addr = NULL;\n\tint rc;\n\tstruct drx_common_attr *common_attr = NULL;": "static int set_vsb(struct drx_demod_instance *demod)\n{\n\tstruct i2c_device_addr *dev_addr = NULL;\n\tint rc;\n\tstruct drx_common_attr *common_attr = NULL;\n\tstruct drxjscu_cmd cmd_scu;\n\tstruct drxj_data *ext_attr = NULL;\n\tu16 cmd_result = 0;\n\tu16 cmd_param = 0;\n\tstatic const u8 vsb_taps_re[] = {\n\t\tDRXJ_16TO8(-2),\t/* re0 */\n\t\tDRXJ_16TO8(4),\t/* re1 */\n\t\tDRXJ_16TO8(1),\t/* re2 */\n\t\tDRXJ_16TO8(-4),\t/* re3 */\n\t\tDRXJ_16TO8(1),\t/* re4 */\n\t\tDRXJ_16TO8(4),\t/* re5 */\n\t\tDRXJ_16TO8(-3),\t/* re6 */\n\t\tDRXJ_16TO8(-3),\t/* re7 */\n\t\tDRXJ_16TO8(6),\t/* re8 */\n\t\tDRXJ_16TO8(1),\t/* re9 */\n\t\tDRXJ_16TO8(-9),\t/* re10 */\n\t\tDRXJ_16TO8(3),\t/* re11 */\n\t\tDRXJ_16TO8(12),\t/* re12 */\n\t\tDRXJ_16TO8(-9),\t/* re13 */\n\t\tDRXJ_16TO8(-15),\t/* re14 */\n\t\tDRXJ_16TO8(17),\t/* re15 */\n\t\tDRXJ_16TO8(19),\t/* re16 */\n\t\tDRXJ_16TO8(-29),\t/* re17 */\n\t\tDRXJ_16TO8(-22),\t/* re18 */\n\t\tDRXJ_16TO8(45),\t/* re19 */\n\t\tDRXJ_16TO8(25),\t/* re20 */\n\t\tDRXJ_16TO8(-70),\t/* re21 */\n\t\tDRXJ_16TO8(-28),\t/* re22 */\n\t\tDRXJ_16TO8(111),\t/* re23 */\n\t\tDRXJ_16TO8(30),\t/* re24 */\n\t\tDRXJ_16TO8(-201),\t/* re25 */\n\t\tDRXJ_16TO8(-31),\t/* re26 */\n\t\tDRXJ_16TO8(629)\t/* re27 */\n\t};\n\n\tdev_addr = demod->my_i2c_dev_addr;\n\tcommon_attr = (struct drx_common_attr *) demod->my_common_attr;\n\text_attr = (struct drxj_data *) demod->my_ext_attr;\n\n\t/* stop all comm_exec */\n\trc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_COMM_EXEC__A, VSB_COMM_EXEC_STOP, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_FS_COMM_EXEC__A, IQM_FS_COMM_EXEC_STOP, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_FD_COMM_EXEC__A, IQM_FD_COMM_EXEC_STOP, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_RC_COMM_EXEC__A, IQM_RC_COMM_EXEC_STOP, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_RT_COMM_EXEC__A, IQM_RT_COMM_EXEC_STOP, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_CF_COMM_EXEC__A, IQM_CF_COMM_EXEC_STOP, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\t/* reset demodulator */\n\tcmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB\n\t | SCU_RAM_COMMAND_CMD_DEMOD_RESET;\n\tcmd_scu.parameter_len = 0;\n\tcmd_scu.result_len = 1;\n\tcmd_scu.parameter = NULL;\n\tcmd_scu.result = &cmd_result;\n\trc = scu_command(dev_addr, &cmd_scu);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\trc = drxj_dap_write_reg16(dev_addr, IQM_AF_DCF_BYPASS__A, 1, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_FS_ADJ_SEL__A, IQM_FS_ADJ_SEL_B_VSB, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_RC_ADJ_SEL__A, IQM_RC_ADJ_SEL_B_VSB, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\text_attr->iqm_rc_rate_ofs = 0x00AD0D79;\n\trc = drxdap_fasi_write_reg32(dev_addr, IQM_RC_RATE_OFS_LO__A, ext_attr->iqm_rc_rate_ofs, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CFAGC_GAINSHIFT__A, 4, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN1TRK__A, 1, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\trc = drxj_dap_write_reg16(dev_addr, IQM_RC_CROUT_ENA__A, 1, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_RC_STRETCH__A, 28, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_RT_ACTIVE__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_CF_SYMMETRIC__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_CF_MIDTAP__A, 3, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_CF_OUT_ENA__A, IQM_CF_OUT_ENA_VSB__M, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_CF_SCALE__A, 1393, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_CF_SCALE_SH__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_CF_POW_MEAS_LEN__A, 1, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\trc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_RE0__A, sizeof(vsb_taps_re), ((u8 *)vsb_taps_re), 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxdap_fasi_write_block(dev_addr, IQM_CF_TAP_IM0__A, sizeof(vsb_taps_re), ((u8 *)vsb_taps_re), 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_BNTHRESH__A, 330, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\t/* set higher threshold */\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CLPLASTNUM__A, 90, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\t/* burst detection on */\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_RCA1__A, 0x0042, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\t/* drop thresholds by 1 dB */\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_RCA2__A, 0x0053, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\t/* drop thresholds by 2 dB */\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_EQCTRL__A, 0x1, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\t/* cma on */\n\trc = drxj_dap_write_reg16(dev_addr, SCU_RAM_GPIO__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\t/* GPIO */\n\n\t/* Initialize the FEC Subsystem */\n\trc = drxj_dap_write_reg16(dev_addr, FEC_TOP_ANNEX__A, FEC_TOP_ANNEX_D, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\t{\n\t\tu16 fec_oc_snc_mode = 0;\n\t\trc = drxj_dap_read_reg16(dev_addr, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t\t/* output data even when not locked */\n\t\trc = drxj_dap_write_reg16(dev_addr, FEC_OC_SNC_MODE__A, fec_oc_snc_mode | FEC_OC_SNC_MODE_UNLOCK_ENABLE__M, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t}\n\n\t/* set clip */\n\trc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLP_LEN__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_AF_CLP_TH__A, 470, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, IQM_AF_SNS_LEN__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_PT__A, 0xD4, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\t/* no transparent, no A&C framing; parity is set in mpegoutput */\n\t{\n\t\tu16 fec_oc_reg_mode = 0;\n\t\trc = drxj_dap_read_reg16(dev_addr, FEC_OC_MODE__A, &fec_oc_reg_mode, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t\trc = drxj_dap_write_reg16(dev_addr, FEC_OC_MODE__A, fec_oc_reg_mode & (~(FEC_OC_MODE_TRANSPARENT__M | FEC_OC_MODE_CLEAR__M | FEC_OC_MODE_RETAIN_FRAMING__M)), 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t}\n\n\trc = drxj_dap_write_reg16(dev_addr, FEC_DI_TIMEOUT_LO__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\t/* timeout counter for restarting */\n\trc = drxj_dap_write_reg16(dev_addr, FEC_DI_TIMEOUT_HI__A, 3, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, FEC_RS_MODE__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\t/* bypass disabled */\n\t/* initialize RS packet error measurement parameters */\n\trc = drxj_dap_write_reg16(dev_addr, FEC_RS_MEASUREMENT_PERIOD__A, FEC_RS_MEASUREMENT_PERIOD, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, FEC_RS_MEASUREMENT_PRESCALE__A, FEC_RS_MEASUREMENT_PRESCALE, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\t/* init measurement period of MER/SER */\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_MEASUREMENT_PERIOD__A, VSB_TOP_MEASUREMENT_PERIOD, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxdap_fasi_write_reg32(dev_addr, SCU_RAM_FEC_ACCUM_CW_CORRECTED_LO__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, SCU_RAM_FEC_MEAS_COUNT__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, 0, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CKGN1TRK__A, 128, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\t/* B-Input to ADC, PGA+filter in standby */\n\tif (!ext_attr->has_lna) {\n\t\trc = drxj_dap_write_reg16(dev_addr, IQM_AF_AMUX__A, 0x02, 0);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t}\n\n\t/* turn on IQMAF. It has to be in front of setAgc**() */\n\trc = set_iqm_af(demod, true);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = adc_synchronization(demod);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\trc = init_agc(demod);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = set_agc_if(demod, &(ext_attr->vsb_if_agc_cfg), false);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = set_agc_rf(demod, &(ext_attr->vsb_rf_agc_cfg), false);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\t{\n\t\t/* TODO fix this, store a struct drxj_cfg_afe_gain structure in struct drxj_data instead\n\t\t of only the gain */\n\t\tstruct drxj_cfg_afe_gain vsb_pga_cfg = { DRX_STANDARD_8VSB, 0 };\n\n\t\tvsb_pga_cfg.gain = ext_attr->vsb_pga_cfg;\n\t\trc = ctrl_set_cfg_afe_gain(demod, &vsb_pga_cfg);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t}\n\trc = ctrl_set_cfg_pre_saw(demod, &(ext_attr->vsb_pre_saw_cfg));\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\t/* Mpeg output has to be in front of FEC active */\n\trc = set_mpegtei_handling(demod);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = bit_reverse_mpeg_output(demod);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = set_mpeg_start_width(demod);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\t{\n\t\t/* TODO: move to set_standard after hardware reset value problem is solved */\n\t\t/* Configure initial MPEG output */\n\t\tstruct drx_cfg_mpeg_output cfg_mpeg_output;\n\n\t\tmemcpy(&cfg_mpeg_output, &common_attr->mpeg_cfg, sizeof(cfg_mpeg_output));\n\t\tcfg_mpeg_output.enable_mpeg_output = true;\n\n\t\trc = ctrl_set_cfg_mpeg_output(demod, &cfg_mpeg_output);\n\t\tif (rc != 0) {\n\t\t\tpr_err(\"error %d\\n\", rc);\n\t\t\tgoto rw_error;\n\t\t}\n\t}\n\n\t/* TBD: what parameters should be set */\n\tcmd_param = 0x00;\t/* Default mode AGC on, etc */\n\tcmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB\n\t | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM;\n\tcmd_scu.parameter_len = 1;\n\tcmd_scu.result_len = 1;\n\tcmd_scu.parameter = &cmd_param;\n\tcmd_scu.result = &cmd_result;\n\trc = scu_command(dev_addr, &cmd_scu);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_BEAGC_GAINSHIFT__A, 0x0004, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SNRTH_PT__A, 0x00D2, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_SYSSMTRNCTRL__A, VSB_TOP_SYSSMTRNCTRL__PRE | VSB_TOP_SYSSMTRNCTRL_NCOTIMEOUTCNTEN__M, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_BEDETCTRL__A, 0x142, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_LBAGCREFLVL__A, 640, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN1ACQ__A, 4, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN1TRK__A, 2, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_TOP_CYGN2TRK__A, 3, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\t/* start demodulator */\n\tcmd_scu.command = SCU_RAM_COMMAND_STANDARD_VSB\n\t | SCU_RAM_COMMAND_CMD_DEMOD_START;\n\tcmd_scu.parameter_len = 0;\n\tcmd_scu.result_len = 1;\n\tcmd_scu.parameter = NULL;\n\tcmd_scu.result = &cmd_result;\n\trc = scu_command(dev_addr, &cmd_scu);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\trc = drxj_dap_write_reg16(dev_addr, IQM_COMM_EXEC__A, IQM_COMM_EXEC_ACTIVE, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, VSB_COMM_EXEC__A, VSB_COMM_EXEC_ACTIVE, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\trc = drxj_dap_write_reg16(dev_addr, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE, 0);\n\tif (rc != 0) {\n\t\tpr_err(\"error %d\\n\", rc);\n\t\tgoto rw_error;\n\t}\n\n\treturn 0;\nrw_error:\n\treturn rc;\n}", "static int s2io_enable_msi_x(struct s2io_nic *nic)\n{\n\tstruct XENA_dev_config __iomem *bar0 = nic->bar0;\n\tu64 rx_mat;\n\tu16 msi_control; /* Temp variable */": "static int s2io_enable_msi_x(struct s2io_nic *nic)\n{\n\tstruct XENA_dev_config __iomem *bar0 = nic->bar0;\n\tu64 rx_mat;\n\tu16 msi_control; /* Temp variable */\n\tint ret, i, j, msix_indx = 1;\n\tint size;\n\tstruct stat_block *stats = nic->mac_control.stats_info;\n\tstruct swStat *swstats = &stats->sw_stat;\n\n\tsize = nic->num_entries * sizeof(struct msix_entry);\n\tnic->entries = kzalloc(size, GFP_KERNEL);\n\tif (!nic->entries) {\n\t\tDBG_PRINT(INFO_DBG, \"%s: Memory allocation failed\\n\",\n\t\t\t __func__);\n\t\tswstats->mem_alloc_fail_cnt++;\n\t\treturn -ENOMEM;\n\t}\n\tswstats->mem_allocated += size;\n\n\tsize = nic->num_entries * sizeof(struct s2io_msix_entry);\n\tnic->s2io_entries = kzalloc(size, GFP_KERNEL);\n\tif (!nic->s2io_entries) {\n\t\tDBG_PRINT(INFO_DBG, \"%s: Memory allocation failed\\n\",\n\t\t\t __func__);\n\t\tswstats->mem_alloc_fail_cnt++;\n\t\tkfree(nic->entries);\n\t\tswstats->mem_freed\n\t\t\t+= (nic->num_entries * sizeof(struct msix_entry));\n\t\treturn -ENOMEM;\n\t}\n\tswstats->mem_allocated += size;\n\n\tnic->entries[0].entry = 0;\n\tnic->s2io_entries[0].entry = 0;\n\tnic->s2io_entries[0].in_use = MSIX_FLG;\n\tnic->s2io_entries[0].type = MSIX_ALARM_TYPE;\n\tnic->s2io_entries[0].arg = &nic->mac_control.fifos;\n\n\tfor (i = 1; i < nic->num_entries; i++) {\n\t\tnic->entries[i].entry = ((i - 1) * 8) + 1;\n\t\tnic->s2io_entries[i].entry = ((i - 1) * 8) + 1;\n\t\tnic->s2io_entries[i].arg = NULL;\n\t\tnic->s2io_entries[i].in_use = 0;\n\t}\n\n\trx_mat = readq(&bar0->rx_mat);\n\tfor (j = 0; j < nic->config.rx_ring_num; j++) {\n\t\trx_mat |= RX_MAT_SET(j, msix_indx);\n\t\tnic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];\n\t\tnic->s2io_entries[j+1].type = MSIX_RING_TYPE;\n\t\tnic->s2io_entries[j+1].in_use = MSIX_FLG;\n\t\tmsix_indx += 8;\n\t}\n\twriteq(rx_mat, &bar0->rx_mat);\n\treadq(&bar0->rx_mat);\n\n\tret = pci_enable_msix_range(nic->pdev, nic->entries,\n\t\t\t\t nic->num_entries, nic->num_entries);\n\t/* We fail init if error or we get less vectors than min required */\n\tif (ret < 0) {\n\t\tDBG_PRINT(ERR_DBG, \"Enabling MSI-X failed\\n\");\n\t\tkfree(nic->entries);\n\t\tswstats->mem_freed += nic->num_entries *\n\t\t\tsizeof(struct msix_entry);\n\t\tkfree(nic->s2io_entries);\n\t\tswstats->mem_freed += nic->num_entries *\n\t\t\tsizeof(struct s2io_msix_entry);\n\t\tnic->entries = NULL;\n\t\tnic->s2io_entries = NULL;\n\t\treturn -ENOMEM;\n\t}\n\n\t/*\n\t * To enable MSI-X, MSI also needs to be enabled, due to a bug\n\t * in the herc NIC. (Temp change, needs to be removed later)\n\t */\n\tpci_read_config_word(nic->pdev, 0x42, &msi_control);\n\tmsi_control |= 0x1; /* Enable MSI */\n\tpci_write_config_word(nic->pdev, 0x42, msi_control);\n\n\treturn 0;\n}", "static void sh_css_setup_queues(void)\n{\n\tconst struct ia_css_fw_info *fw;\n\tunsigned int HIVE_ADDR_host_sp_queues_initialized;\n": "static void sh_css_setup_queues(void)\n{\n\tconst struct ia_css_fw_info *fw;\n\tunsigned int HIVE_ADDR_host_sp_queues_initialized;\n\n\tsh_css_hmm_buffer_record_init();\n\n\tsh_css_event_init_irq_mask();\n\n\tfw = &sh_css_sp_fw;\n\tHIVE_ADDR_host_sp_queues_initialized =\n\t fw->info.sp.host_sp_queues_initialized;\n\n\tia_css_bufq_init();\n\n\t/* set \"host_sp_queues_initialized\" to \"true\" */\n\tsp_dmem_store_uint32(SP0_ID,\n\t\t\t (unsigned int)sp_address_of(host_sp_queues_initialized),\n\t\t\t (uint32_t)(1));\n\tia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, \"sh_css_setup_queues() leave:\\n\");\n}", "static void do_test_pprint(int test_num)\n{\n\tconst struct btf_raw_test *test = &pprint_test_template[test_num];\n\tenum pprint_mapv_kind_t mapv_kind = test->mapv_kind;\n\tLIBBPF_OPTS(bpf_map_create_opts, opts);": "static void do_test_pprint(int test_num)\n{\n\tconst struct btf_raw_test *test = &pprint_test_template[test_num];\n\tenum pprint_mapv_kind_t mapv_kind = test->mapv_kind;\n\tLIBBPF_OPTS(bpf_map_create_opts, opts);\n\tbool ordered_map, lossless_map, percpu_map;\n\tint err, ret, num_cpus, rounded_value_size;\n\tunsigned int key, nr_read_elems;\n\tint map_fd = -1, btf_fd = -1;\n\tunsigned int raw_btf_size;\n\tchar expected_line[255];\n\tFILE *pin_file = NULL;\n\tchar pin_path[255];\n\tsize_t line_len = 0;\n\tchar *line = NULL;\n\tvoid *mapv = NULL;\n\tuint8_t *raw_btf;\n\tssize_t nread;\n\n\tif (!test__start_subtest(test->descr))\n\t\treturn;\n\n\traw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,\n\t\t\t\t test->str_sec, test->str_sec_size,\n\t\t\t\t &raw_btf_size, NULL);\n\n\tif (!raw_btf)\n\t\treturn;\n\n\t*btf_log_buf = '\\0';\n\tbtf_fd = load_raw_btf(raw_btf, raw_btf_size);\n\tfree(raw_btf);\n\n\tif (CHECK(btf_fd < 0, \"errno:%d\\n\", errno)) {\n\t\terr = -1;\n\t\tgoto done;\n\t}\n\n\topts.btf_fd = btf_fd;\n\topts.btf_key_type_id = test->key_type_id;\n\topts.btf_value_type_id = test->value_type_id;\n\tmap_fd = bpf_map_create(test->map_type, test->map_name,\n\t\t\t\ttest->key_size, test->value_size, test->max_entries, &opts);\n\tif (CHECK(map_fd < 0, \"errno:%d\", errno)) {\n\t\terr = -1;\n\t\tgoto done;\n\t}\n\n\tret = snprintf(pin_path, sizeof(pin_path), \"%s/%s\",\n\t\t \"/sys/fs/bpf\", test->map_name);\n\n\tif (CHECK(ret == sizeof(pin_path), \"pin_path %s/%s is too long\",\n\t\t \"/sys/fs/bpf\", test->map_name)) {\n\t\terr = -1;\n\t\tgoto done;\n\t}\n\n\terr = bpf_obj_pin(map_fd, pin_path);\n\tif (CHECK(err, \"bpf_obj_pin(%s): errno:%d.\", pin_path, errno))\n\t\tgoto done;\n\n\tpercpu_map = test->percpu_map;\n\tnum_cpus = percpu_map ? bpf_num_possible_cpus() : 1;\n\trounded_value_size = round_up(get_pprint_mapv_size(mapv_kind), 8);\n\tmapv = calloc(num_cpus, rounded_value_size);\n\tif (CHECK(!mapv, \"mapv allocation failure\")) {\n\t\terr = -1;\n\t\tgoto done;\n\t}\n\n\tfor (key = 0; key < test->max_entries; key++) {\n\t\tset_pprint_mapv(mapv_kind, mapv, key, num_cpus, rounded_value_size);\n\t\tbpf_map_update_elem(map_fd, &key, mapv, 0);\n\t}\n\n\tpin_file = fopen(pin_path, \"r\");\n\tif (CHECK(!pin_file, \"fopen(%s): errno:%d\", pin_path, errno)) {\n\t\terr = -1;\n\t\tgoto done;\n\t}\n\n\t/* Skip lines start with '#' */\n\twhile ((nread = getline(&line, &line_len, pin_file)) > 0 &&\n\t *line == '#')\n\t\t;\n\n\tif (CHECK(nread <= 0, \"Unexpected EOF\")) {\n\t\terr = -1;\n\t\tgoto done;\n\t}\n\n\tnr_read_elems = 0;\n\tordered_map = test->ordered_map;\n\tlossless_map = test->lossless_map;\n\tdo {\n\t\tssize_t nexpected_line;\n\t\tunsigned int next_key;\n\t\tvoid *cmapv;\n\t\tint cpu;\n\n\t\tnext_key = ordered_map ? nr_read_elems : atoi(line);\n\t\tset_pprint_mapv(mapv_kind, mapv, next_key, num_cpus, rounded_value_size);\n\t\tcmapv = mapv;\n\n\t\tfor (cpu = 0; cpu < num_cpus; cpu++) {\n\t\t\tif (percpu_map) {\n\t\t\t\t/* for percpu map, the format looks like:\n\t\t\t\t * : {\n\t\t\t\t *\tcpu0: \n\t\t\t\t *\tcpu1: \n\t\t\t\t *\t...\n\t\t\t\t *\tcpun: \n\t\t\t\t * }\n\t\t\t\t *\n\t\t\t\t * let us verify the line containing the key here.\n\t\t\t\t */\n\t\t\t\tif (cpu == 0) {\n\t\t\t\t\tnexpected_line = snprintf(expected_line,\n\t\t\t\t\t\t\t\t sizeof(expected_line),\n\t\t\t\t\t\t\t\t \"%u: {\\n\",\n\t\t\t\t\t\t\t\t next_key);\n\n\t\t\t\t\terr = check_line(expected_line, nexpected_line,\n\t\t\t\t\t\t\t sizeof(expected_line), line);\n\t\t\t\t\tif (err < 0)\n\t\t\t\t\t\tgoto done;\n\t\t\t\t}\n\n\t\t\t\t/* read value@cpu */\n\t\t\t\tnread = getline(&line, &line_len, pin_file);\n\t\t\t\tif (nread < 0)\n\t\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tnexpected_line = get_pprint_expected_line(mapv_kind, expected_line,\n\t\t\t\t\t\t\t\t sizeof(expected_line),\n\t\t\t\t\t\t\t\t percpu_map, next_key,\n\t\t\t\t\t\t\t\t cpu, cmapv);\n\t\t\terr = check_line(expected_line, nexpected_line,\n\t\t\t\t\t sizeof(expected_line), line);\n\t\t\tif (err < 0)\n\t\t\t\tgoto done;\n\n\t\t\tcmapv = cmapv + rounded_value_size;\n\t\t}\n\n\t\tif (percpu_map) {\n\t\t\t/* skip the last bracket for the percpu map */\n\t\t\tnread = getline(&line, &line_len, pin_file);\n\t\t\tif (nread < 0)\n\t\t\t\tbreak;\n\t\t}\n\n\t\tnread = getline(&line, &line_len, pin_file);\n\t} while (++nr_read_elems < test->max_entries && nread > 0);\n\n\tif (lossless_map &&\n\t CHECK(nr_read_elems < test->max_entries,\n\t\t \"Unexpected EOF. nr_read_elems:%u test->max_entries:%u\",\n\t\t nr_read_elems, test->max_entries)) {\n\t\terr = -1;\n\t\tgoto done;\n\t}\n\n\tif (CHECK(nread > 0, \"Unexpected extra pprint output: %s\", line)) {\n\t\terr = -1;\n\t\tgoto done;\n\t}\n\n\terr = 0;\n\ndone:\n\tif (mapv)\n\t\tfree(mapv);\n\tif (!err)\n\t\tfprintf(stderr, \"OK\");\n\tif (*btf_log_buf && (err || always_log))\n\t\tfprintf(stderr, \"\\n%s\", btf_log_buf);\n\tif (btf_fd >= 0)\n\t\tclose(btf_fd);\n\tif (map_fd >= 0)\n\t\tclose(map_fd);\n\tif (pin_file)\n\t\tfclose(pin_file);\n\tunlink(pin_path);\n\tfree(line);\n}", "static int pm8xxx_calibrate_device(struct pm8xxx_xoadc *adc)\n{\n\tconst struct pm8xxx_chan_info *ch;\n\tu16 read_1250v;\n\tu16 read_0625v;": "static int pm8xxx_calibrate_device(struct pm8xxx_xoadc *adc)\n{\n\tconst struct pm8xxx_chan_info *ch;\n\tu16 read_1250v;\n\tu16 read_0625v;\n\tu16 read_nomux_rsv5;\n\tu16 read_nomux_rsv4;\n\tint ret;\n\n\tadc->graph[VADC_CALIB_ABSOLUTE].dx = VADC_ABSOLUTE_RANGE_UV;\n\tadc->graph[VADC_CALIB_RATIOMETRIC].dx = VADC_RATIOMETRIC_RANGE;\n\n\t/* Common reference channel calibration */\n\tch = pm8xxx_get_channel(adc, PM8XXX_CHANNEL_125V);\n\tif (!ch)\n\t\treturn -ENODEV;\n\tret = pm8xxx_read_channel(adc, ch, &read_1250v);\n\tif (ret) {\n\t\tdev_err(adc->dev, \"could not read 1.25V reference channel\\n\");\n\t\treturn -ENODEV;\n\t}\n\tch = pm8xxx_get_channel(adc, PM8XXX_CHANNEL_INTERNAL);\n\tif (!ch)\n\t\treturn -ENODEV;\n\tret = pm8xxx_read_channel(adc, ch, &read_0625v);\n\tif (ret) {\n\t\tdev_err(adc->dev, \"could not read 0.625V reference channel\\n\");\n\t\treturn -ENODEV;\n\t}\n\tif (read_1250v == read_0625v) {\n\t\tdev_err(adc->dev, \"read same ADC code for 1.25V and 0.625V\\n\");\n\t\treturn -ENODEV;\n\t}\n\n\tadc->graph[VADC_CALIB_ABSOLUTE].dy = read_1250v - read_0625v;\n\tadc->graph[VADC_CALIB_ABSOLUTE].gnd = read_0625v;\n\n\tdev_info(adc->dev, \"absolute calibration dx = %d uV, dy = %d units\\n\",\n\t\t VADC_ABSOLUTE_RANGE_UV, adc->graph[VADC_CALIB_ABSOLUTE].dy);\n\n\t/* Ratiometric calibration */\n\tch = pm8xxx_get_channel(adc, PM8XXX_CHANNEL_MUXOFF);\n\tif (!ch)\n\t\treturn -ENODEV;\n\tret = pm8xxx_read_channel_rsv(adc, ch, AMUX_RSV5,\n\t\t\t\t &read_nomux_rsv5, true);\n\tif (ret) {\n\t\tdev_err(adc->dev, \"could not read MUXOFF reference channel\\n\");\n\t\treturn -ENODEV;\n\t}\n\tret = pm8xxx_read_channel_rsv(adc, ch, AMUX_RSV4,\n\t\t\t\t &read_nomux_rsv4, true);\n\tif (ret) {\n\t\tdev_err(adc->dev, \"could not read MUXOFF reference channel\\n\");\n\t\treturn -ENODEV;\n\t}\n\tadc->graph[VADC_CALIB_RATIOMETRIC].dy =\n\t\tread_nomux_rsv5 - read_nomux_rsv4;\n\tadc->graph[VADC_CALIB_RATIOMETRIC].gnd = read_nomux_rsv4;\n\n\tdev_info(adc->dev, \"ratiometric calibration dx = %d, dy = %d units\\n\",\n\t\t VADC_RATIOMETRIC_RANGE,\n\t\t adc->graph[VADC_CALIB_RATIOMETRIC].dy);\n\n\treturn 0;\n}", "static void stk1135_configure_mt9m112(struct gspca_dev *gspca_dev)\n{\n\tstatic const struct sensor_val cfg[] = {\n\t\t/* restart&reset, chip enable, reserved */\n\t\t{ 0x00d, 0x000b }, { 0x00d, 0x0008 }, { 0x035, 0x0022 },": "static void stk1135_configure_mt9m112(struct gspca_dev *gspca_dev)\n{\n\tstatic const struct sensor_val cfg[] = {\n\t\t/* restart&reset, chip enable, reserved */\n\t\t{ 0x00d, 0x000b }, { 0x00d, 0x0008 }, { 0x035, 0x0022 },\n\t\t/* mode ctl: AWB on, AE both, clip aper corr, defect corr, AE */\n\t\t{ 0x106, 0x700e },\n\n\t\t{ 0x2dd, 0x18e0 }, /* B-R thresholds, */\n\n\t\t/* AWB */\n\t\t{ 0x21f, 0x0180 }, /* Cb and Cr limits */\n\t\t{ 0x220, 0xc814 }, { 0x221, 0x8080 }, /* lum limits, RGB gain */\n\t\t{ 0x222, 0xa078 }, { 0x223, 0xa078 }, /* R, B limit */\n\t\t{ 0x224, 0x5f20 }, { 0x228, 0xea02 }, /* mtx adj lim, adv ctl */\n\t\t{ 0x229, 0x867a }, /* wide gates */\n\n\t\t/* Color correction */\n\t\t/* imager gains base, delta, delta signs */\n\t\t{ 0x25e, 0x594c }, { 0x25f, 0x4d51 }, { 0x260, 0x0002 },\n\t\t/* AWB adv ctl 2, gain offs */\n\t\t{ 0x2ef, 0x0008 }, { 0x2f2, 0x0000 },\n\t\t/* base matrix signs, scale K1-5, K6-9 */\n\t\t{ 0x202, 0x00ee }, { 0x203, 0x3923 }, { 0x204, 0x0724 },\n\t\t/* base matrix coef */\n\t\t{ 0x209, 0x00cd }, { 0x20a, 0x0093 }, { 0x20b, 0x0004 },/*K1-3*/\n\t\t{ 0x20c, 0x005c }, { 0x20d, 0x00d9 }, { 0x20e, 0x0053 },/*K4-6*/\n\t\t{ 0x20f, 0x0008 }, { 0x210, 0x0091 }, { 0x211, 0x00cf },/*K7-9*/\n\t\t{ 0x215, 0x0000 }, /* delta mtx signs */\n\t\t/* delta matrix coef */\n\t\t{ 0x216, 0x0000 }, { 0x217, 0x0000 }, { 0x218, 0x0000 },/*D1-3*/\n\t\t{ 0x219, 0x0000 }, { 0x21a, 0x0000 }, { 0x21b, 0x0000 },/*D4-6*/\n\t\t{ 0x21c, 0x0000 }, { 0x21d, 0x0000 }, { 0x21e, 0x0000 },/*D7-9*/\n\t\t/* enable & disable manual WB to apply color corr. settings */\n\t\t{ 0x106, 0xf00e }, { 0x106, 0x700e },\n\n\t\t/* Lens shading correction */\n\t\t{ 0x180, 0x0007 }, /* control */\n\t\t/* vertical knee 0, 2+1, 4+3 */\n\t\t{ 0x181, 0xde13 }, { 0x182, 0xebe2 }, { 0x183, 0x00f6 }, /* R */\n\t\t{ 0x184, 0xe114 }, { 0x185, 0xeadd }, { 0x186, 0xfdf6 }, /* G */\n\t\t{ 0x187, 0xe511 }, { 0x188, 0xede6 }, { 0x189, 0xfbf7 }, /* B */\n\t\t/* horizontal knee 0, 2+1, 4+3, 5 */\n\t\t{ 0x18a, 0xd613 }, { 0x18b, 0xedec }, /* R .. */\n\t\t{ 0x18c, 0xf9f2 }, { 0x18d, 0x0000 }, /* .. R */\n\t\t{ 0x18e, 0xd815 }, { 0x18f, 0xe9ea }, /* G .. */\n\t\t{ 0x190, 0xf9f1 }, { 0x191, 0x0002 }, /* .. G */\n\t\t{ 0x192, 0xde10 }, { 0x193, 0xefef }, /* B .. */\n\t\t{ 0x194, 0xfbf4 }, { 0x195, 0x0002 }, /* .. B */\n\t\t/* vertical knee 6+5, 8+7 */\n\t\t{ 0x1b6, 0x0e06 }, { 0x1b7, 0x2713 }, /* R */\n\t\t{ 0x1b8, 0x1106 }, { 0x1b9, 0x2713 }, /* G */\n\t\t{ 0x1ba, 0x0c03 }, { 0x1bb, 0x2a0f }, /* B */\n\t\t/* horizontal knee 7+6, 9+8, 10 */\n\t\t{ 0x1bc, 0x1208 }, { 0x1bd, 0x1a16 }, { 0x1be, 0x0022 }, /* R */\n\t\t{ 0x1bf, 0x150a }, { 0x1c0, 0x1c1a }, { 0x1c1, 0x002d }, /* G */\n\t\t{ 0x1c2, 0x1109 }, { 0x1c3, 0x1414 }, { 0x1c4, 0x002a }, /* B */\n\t\t{ 0x106, 0x740e }, /* enable lens shading correction */\n\n\t\t/* Gamma correction - context A */\n\t\t{ 0x153, 0x0b03 }, { 0x154, 0x4722 }, { 0x155, 0xac82 },\n\t\t{ 0x156, 0xdac7 }, { 0x157, 0xf5e9 }, { 0x158, 0xff00 },\n\t\t/* Gamma correction - context B */\n\t\t{ 0x1dc, 0x0b03 }, { 0x1dd, 0x4722 }, { 0x1de, 0xac82 },\n\t\t{ 0x1df, 0xdac7 }, { 0x1e0, 0xf5e9 }, { 0x1e1, 0xff00 },\n\n\t\t/* output format: RGB, invert output pixclock, output bayer */\n\t\t{ 0x13a, 0x4300 }, { 0x19b, 0x4300 }, /* for context A, B */\n\t\t{ 0x108, 0x0180 }, /* format control - enable bayer row flip */\n\n\t\t{ 0x22f, 0xd100 }, { 0x29c, 0xd100 }, /* AE A, B */\n\n\t\t/* default prg conf, prg ctl - by 0x2d2, prg advance - PA1 */\n\t\t{ 0x2d2, 0x0000 }, { 0x2cc, 0x0004 }, { 0x2cb, 0x0001 },\n\n\t\t{ 0x22e, 0x0c3c }, { 0x267, 0x1010 }, /* AE tgt ctl, gain lim */\n\n\t\t/* PLL */\n\t\t{ 0x065, 0xa000 }, /* clk ctl - enable PLL (clear bit 14) */\n\t\t{ 0x066, 0x2003 }, { 0x067, 0x0501 }, /* PLL M=128, N=3, P=1 */\n\t\t{ 0x065, 0x2000 }, /* disable PLL bypass (clear bit 15) */\n\n\t\t{ 0x005, 0x01b8 }, { 0x007, 0x00d8 }, /* horiz blanking B, A */\n\n\t\t/* AE line size, shutter delay limit */\n\t\t{ 0x239, 0x06c0 }, { 0x23b, 0x040e }, /* for context A */\n\t\t{ 0x23a, 0x06c0 }, { 0x23c, 0x0564 }, /* for context B */\n\t\t/* shutter width basis 60Hz, 50Hz */\n\t\t{ 0x257, 0x0208 }, { 0x258, 0x0271 }, /* for context A */\n\t\t{ 0x259, 0x0209 }, { 0x25a, 0x0271 }, /* for context B */\n\n\t\t{ 0x25c, 0x120d }, { 0x25d, 0x1712 }, /* flicker 60Hz, 50Hz */\n\t\t{ 0x264, 0x5e1c }, /* reserved */\n\t\t/* flicker, AE gain limits, gain zone limits */\n\t\t{ 0x25b, 0x0003 }, { 0x236, 0x7810 }, { 0x237, 0x8304 },\n\n\t\t{ 0x008, 0x0021 }, /* vert blanking A */\n\t};\n\tint i;\n\tu16 width, height;\n\n\tfor (i = 0; i < ARRAY_SIZE(cfg); i++)\n\t\tsensor_write(gspca_dev, cfg[i].reg, cfg[i].val);\n\n\t/* set output size */\n\twidth = gspca_dev->pixfmt.width;\n\theight = gspca_dev->pixfmt.height;\n\tif (width <= 640 && height <= 512) { /* context A (half readout speed)*/\n\t\tsensor_write(gspca_dev, 0x1a7, width);\n\t\tsensor_write(gspca_dev, 0x1aa, height);\n\t\t/* set read mode context A */\n\t\tsensor_write(gspca_dev, 0x0c8, 0x0000);\n\t\t/* set resize, read mode, vblank, hblank context A */\n\t\tsensor_write(gspca_dev, 0x2c8, 0x0000);\n\t} else { /* context B (full readout speed) */\n\t\tsensor_write(gspca_dev, 0x1a1, width);\n\t\tsensor_write(gspca_dev, 0x1a4, height);\n\t\t/* set read mode context B */\n\t\tsensor_write(gspca_dev, 0x0c8, 0x0008);\n\t\t/* set resize, read mode, vblank, hblank context B */\n\t\tsensor_write(gspca_dev, 0x2c8, 0x040b);\n\t}\n}", "static int patch_alc850(struct snd_ac97 *ac97)\n{\n\tac97->build_ops = &patch_alc850_ops;\n\n\tac97->spec.dev_flags = 0; /* for IEC958 playback route - ALC655 compatible */": "static int patch_alc850(struct snd_ac97 *ac97)\n{\n\tac97->build_ops = &patch_alc850_ops;\n\n\tac97->spec.dev_flags = 0; /* for IEC958 playback route - ALC655 compatible */\n\tac97->flags |= AC97_HAS_8CH;\n\n\t/* assume only page 0 for writing cache */\n\tsnd_ac97_update_bits(ac97, AC97_INT_PAGING, AC97_PAGE_MASK, AC97_PAGE_VENDOR);\n\n\t/* adjust default values */\n\t/* set default: spdif-in enabled,\n\t spdif-in monitor off, spdif-in PCM off\n\t center on mic off, surround on line-in off\n\t duplicate front off\n\t NB default bit 10=0 = Aux is Capture, not Back Surround\n\t*/\n\tsnd_ac97_write_cache(ac97, AC97_ALC650_MULTICH, 1<<15);\n\t/* SURR_OUT: on, Surr 1kOhm: on, Surr Amp: off, Front 1kOhm: off\n\t * Front Amp: on, Vref: enable, Center 1kOhm: on, Mix: on\n\t */\n\tsnd_ac97_write_cache(ac97, 0x7a, (1<<1)|(1<<4)|(0<<5)|(1<<6)|\n\t\t\t (1<<7)|(0<<12)|(1<<13)|(0<<14));\n\t/* detection UIO2,3: all path floating, UIO3: MIC, Vref2: disable,\n\t * UIO1: FRONT, Vref3: disable, UIO3: LINE, Front-Mic: mute\n\t */\n\tsnd_ac97_write_cache(ac97, 0x76, (0<<0)|(0<<2)|(1<<4)|(1<<7)|(2<<8)|\n\t\t\t (1<<11)|(0<<12)|(1<<15));\n\n\t/* full DAC volume */\n\tsnd_ac97_write_cache(ac97, AC97_ALC650_SURR_DAC_VOL, 0x0808);\n\tsnd_ac97_write_cache(ac97, AC97_ALC650_LFE_DAC_VOL, 0x0808);\n\treturn 0;\n}", "static int __init stnic_probe(void)\n{\n struct net_device *dev;\n struct ei_device *ei_local;\n int err;": "static int __init stnic_probe(void)\n{\n struct net_device *dev;\n struct ei_device *ei_local;\n int err;\n\n /* If we are not running on a SolutionEngine, give up now */\n if (! MACH_SE)\n return -ENODEV;\n\n /* New style probing API */\n dev = alloc_ei_netdev();\n if (!dev)\n\treturn -ENOMEM;\n\n#ifdef CONFIG_SH_STANDARD_BIOS\n sh_bios_get_node_addr (stnic_eadr);\n#endif\n eth_hw_addr_set(dev, stnic_eadr);\n\n /* Set the base address to point to the NIC, not the \"real\" base! */\n dev->base_addr = 0x1000;\n dev->irq = IRQ_STNIC;\n dev->netdev_ops = &ei_netdev_ops;\n\n /* Snarf the interrupt now. There's no point in waiting since we cannot\n share and the board will usually be enabled. */\n err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev);\n if (err) {\n\tnetdev_emerg(dev, \" unable to get IRQ %d.\\n\", dev->irq);\n\tfree_netdev(dev);\n\treturn err;\n }\n\n ei_status.name = dev->name;\n ei_status.word16 = 1;\n#ifdef __LITTLE_ENDIAN__\n ei_status.bigendian = 0;\n#else\n ei_status.bigendian = 1;\n#endif\n ei_status.tx_start_page = START_PG;\n ei_status.rx_start_page = START_PG + TX_PAGES;\n ei_status.stop_page = STOP_PG;\n\n ei_status.reset_8390 = &stnic_reset;\n ei_status.get_8390_hdr = &stnic_get_hdr;\n ei_status.block_input = &stnic_block_input;\n ei_status.block_output = &stnic_block_output;\n\n stnic_init (dev);\n ei_local = netdev_priv(dev);\n ei_local->msg_enable = stnic_msg_enable;\n\n err = register_netdev(dev);\n if (err) {\n free_irq(dev->irq, dev);\n free_netdev(dev);\n return err;\n }\n stnic_dev = dev;\n\n netdev_info(dev, \"NS ST-NIC 83902A\\n\");\n\n return 0;\n}", "static void tr_static_init(void)\n{\n static int static_init_done;\n int n; /* iterates over tree elements */\n int bits; /* bit counter */": "static void tr_static_init(void)\n{\n static int static_init_done;\n int n; /* iterates over tree elements */\n int bits; /* bit counter */\n int length; /* length value */\n int code; /* code value */\n int dist; /* distance index */\n ush bl_count[MAX_BITS+1];\n /* number of codes at each bit length for an optimal tree */\n\n if (static_init_done) return;\n\n /* Initialize the mapping length (0..255) -> length code (0..28) */\n length = 0;\n for (code = 0; code < LENGTH_CODES-1; code++) {\n base_length[code] = length;\n for (n = 0; n < (1< dist code (0..29) */\n dist = 0;\n for (code = 0 ; code < 16; code++) {\n base_dist[code] = dist;\n for (n = 0; n < (1<>= 7; /* from now on, all distances are divided by 128 */\n for ( ; code < D_CODES; code++) {\n base_dist[code] = dist << 7;\n for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {\n dist_code[256 + dist++] = (uch)code;\n }\n }\n Assert (dist == 256, \"tr_static_init: 256+dist != 512\");\n\n /* Construct the codes of the static literal tree */\n for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;\n n = 0;\n while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;\n while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;\n while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;\n while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;\n /* Codes 286 and 287 do not exist, but we must include them in the\n * tree construction to get a canonical Huffman tree (longest code\n * all ones)\n */\n gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);\n\n /* The static distance tree is trivial: */\n for (n = 0; n < D_CODES; n++) {\n static_dtree[n].Len = 5;\n static_dtree[n].Code = bitrev32((u32)n) >> (32 - 5);\n }\n static_init_done = 1;\n}", "static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)\n{\n\t/* Do interleaved scan only if all of the following are true:\n\t * - There is at least one ADV monitor\n\t * - At least one pending LE connection or one device to be scanned for": "static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)\n{\n\t/* Do interleaved scan only if all of the following are true:\n\t * - There is at least one ADV monitor\n\t * - At least one pending LE connection or one device to be scanned for\n\t * - Monitor offloading is not supported\n\t * If so, we should alternate between allowlist scan and one without\n\t * any filters to save power.\n\t */\n\tbool use_interleaving = hci_is_adv_monitoring(hdev) &&\n\t\t\t\t!(list_empty(&hdev->pend_le_conns) &&\n\t\t\t\t list_empty(&hdev->pend_le_reports)) &&\n\t\t\t\thci_get_adv_monitor_offload_ext(hdev) ==\n\t\t\t\t HCI_ADV_MONITOR_EXT_NONE;\n\tbool is_interleaving = is_interleave_scanning(hdev);\n\n\tif (use_interleaving && !is_interleaving) {\n\t\thci_start_interleave_scan(hdev);\n\t\tbt_dev_dbg(hdev, \"starting interleave scan\");\n\t\treturn true;\n\t}\n\n\tif (!use_interleaving && is_interleaving)\n\t\tcancel_interleave_scan(hdev);\n\n\treturn false;\n}", "static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)\n{\n\t/*\n\t * cpusets delegate hotplug operations to a worker to \"solve\" the\n\t * lock order problems. Wait for the worker, but only if tasks are": "static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)\n{\n\t/*\n\t * cpusets delegate hotplug operations to a worker to \"solve\" the\n\t * lock order problems. Wait for the worker, but only if tasks are\n\t * _not_ frozen (suspend, hibernate) as that would wait forever.\n\t *\n\t * The wait is required because otherwise the hotplug operation\n\t * returns with inconsistent state, which could even be observed in\n\t * user space when a new CPU is brought up. The CPU plug uevent\n\t * would be delivered and user space reacting on it would fail to\n\t * move tasks to the newly plugged CPU up to the point where the\n\t * work has finished because up to that point the newly plugged CPU\n\t * is not assignable in cpusets/cgroups. On unplug that's not\n\t * necessarily a visible issue, but it is still inconsistent state,\n\t * which is the real problem which needs to be \"fixed\". This can't\n\t * prevent the transient state between scheduling the work and\n\t * returning from waiting for it.\n\t */\n\tif (!tasks_frozen)\n\t\tcpuset_wait_for_hotplug();\n}", "static void die_usage(void)\n{\n\tfprintf(stderr, \"Usage: mptcp_connect [-6] [-c cmsg] [-i file] [-I num] [-j] [-l] \"\n\t\t\"[-m mode] [-M mark] [-o option] [-p port] [-P mode] [-j] [-l] [-r num] \"\n\t\t\"[-s MPTCP|TCP] [-S num] [-r num] [-t num] [-T num] [-u] [-w sec] connect_address\\n\");": "static void die_usage(void)\n{\n\tfprintf(stderr, \"Usage: mptcp_connect [-6] [-c cmsg] [-i file] [-I num] [-j] [-l] \"\n\t\t\"[-m mode] [-M mark] [-o option] [-p port] [-P mode] [-j] [-l] [-r num] \"\n\t\t\"[-s MPTCP|TCP] [-S num] [-r num] [-t num] [-T num] [-u] [-w sec] connect_address\\n\");\n\tfprintf(stderr, \"\\t-6 use ipv6\\n\");\n\tfprintf(stderr, \"\\t-c cmsg -- test cmsg type \\n\");\n\tfprintf(stderr, \"\\t-i file -- read the data to send from the given file instead of stdin\");\n\tfprintf(stderr, \"\\t-I num -- repeat the transfer 'num' times. In listen mode accepts num \"\n\t\t\"incoming connections, in client mode, disconnect and reconnect to the server\\n\");\n\tfprintf(stderr, \"\\t-j -- add additional sleep at connection start and tear down \"\n\t\t\"-- for MPJ tests\\n\");\n\tfprintf(stderr, \"\\t-l -- listens mode, accepts incoming connection\\n\");\n\tfprintf(stderr, \"\\t-m [poll|mmap|sendfile] -- use poll(default)/mmap+write/sendfile\\n\");\n\tfprintf(stderr, \"\\t-M mark -- set socket packet mark\\n\");\n\tfprintf(stderr, \"\\t-o option -- test sockopt