content
stringlengths 19
48.2k
|
---|
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/**
* Implements libFuzzer's custom mutator interface.
*/
#pragma once
#include <test/tools/ossfuzz/SolidityGenerator.h>
#include <memory>
namespace solidity::test::fuzzer::mutator
{
struct SolidityCustomMutatorInterface
{
SolidityCustomMutatorInterface(uint8_t* _data, size_t _size, size_t _maxSize, unsigned _seed);
/// Generates Solidity test program, copies it into buffer
/// provided by libFuzzer and @returns size of the test program.
size_t generate();
/// Raw pointer to libFuzzer provided input
uint8_t* data;
/// Size of libFuzzer provided input
size_t size;
/// Maximum length of mutant specified by libFuzzer
size_t maxMutantSize;
/// Solidity generator handle
std::shared_ptr<SolidityGenerator> generator;
};
}
|
/**
* Reverse byte order of a 16 bit word.
*/
static inline uint16_t
util_bswap16(uint16_t n)
{
return (n >> 8) |
(n << 8);
} |
#include<stdio.h>
int main()
{
int n,i,x=0,y=0,z=0,w=0,c=0,q=0;
scanf("%d",&n);
int a[n];
for(i=0;i<n;i++)
scanf("%d",&a[i]);
for(i=0;i<n;i++)
{
if(a[i]==1)
x++;
if(a[i]==2)
y++;
if(a[i]==3)
z++;
if(a[i]==4)
w++;
}
c=c+w;
if(x<=z)
q=x;
else
q=z;
if(q!=0)
{
x=x-q;
z=z-q;
c=c+q;
}
c=c+z;
c=c+(x/4)+((y*2)/4);
x=x%4;
y=(y*2)%4;
if(x>y&&y!=0)
c++;
if((x<=y&&x!=0&&y!=0)||(x==1)||(x==2)||(x==3)||(y==2))
c++;
printf("%d",c);
}
|
// THIS CODE IS FOR TESTING PURPOSES ONLY. DO NOT USE IN PRODUCTION ENVIRONMENTS. REPLACE WITH A PROPER IMPLEMENTATION BEFORE USE
int8_t mbed_cloud_client_get_rot_128bit(uint8_t *key_buf, uint32_t length)
{
#warning "You are using insecure Root Of Trust implementation, DO NOT USE IN PRODUCTION ENVIRONMENTS. REPLACE WITH A PROPER IMPLEMENTATION BEFORE USE"
if (length < DEVICE_KEY_SIZE_IN_BYTES || key_buf == NULL)
{
return -1;
}
for (uint8_t i = 0; i < DEVICE_KEY_SIZE_IN_BYTES; i++)
{
key_buf[i] = i;
}
return 0;
} |
/* If allow is FALSE, block any SIGWINCH signal. If allow is TRUE,
* unblock SIGWINCH so any pending ones can be dealt with. */
void allow_sigwinch(bool allow)
{
sigset_t winch;
sigemptyset(&winch);
sigaddset(&winch, SIGWINCH);
sigprocmask(allow ? SIG_UNBLOCK : SIG_BLOCK, &winch, NULL);
} |
// Auton Selector Written by Owen Oertell
int AutonSelector() {
lv_theme_t * th = lv_theme_alien_init(65, NULL);
lv_theme_set_current(th);
lv_obj_t * scr = lv_page_create(NULL, NULL);
lv_scr_load(scr);
lv_obj_t * label = lv_label_create(lv_scr_act(), NULL);
lv_label_set_text(label, "");
static const char * btnm_str[] = {"Blue P", "Skills", "Red P", "\n", "Blue UP", "EXP", "Red UP", ""};
lv_obj_t * btnm = lv_btnm_create(lv_scr_act(), NULL);
lv_obj_set_size(btnm, 280, 180);
lv_btnm_set_map(btnm, btnm_str);
lv_obj_align(btnm, lv_scr_act(), LV_ALIGN_CENTER, -70, 0);
lv_btnm_set_toggle(btnm, true, 1);
lv_btnm_set_action(btnm, setAuton);
lv_obj_t * img1 = lv_img_create(lv_scr_act(), NULL);
lv_img_set_src(img1,&DogLogo);
lv_img_set_auto_size(img1, true);
lv_obj_align(img1, lv_scr_act(), LV_ALIGN_CENTER, 160,0);
} |
/*
* pqCommandQueueAdvance
* Remove one query from the command queue, when we receive
* all results from the server that pertain to it.
*/
void
pqCommandQueueAdvance(PGconn *conn)
{
PGcmdQueueEntry *prevquery;
if (conn->cmd_queue_head == NULL)
return;
prevquery = conn->cmd_queue_head;
conn->cmd_queue_head = conn->cmd_queue_head->next;
prevquery->next = NULL;
pqRecycleCmdQueueEntry(conn, prevquery);
} |
#include<stdio.h>
int main()
{
long long int b,id=0,i;
scanf("%lld",&b);
for( i=1; i*i<=b; i++)
{
if(b%i==0)
{
id++;
if(i!=b/i)
id++;
}
}
printf("%lld\n",id);
}
|
/*
*************************************************************************
* FunctionName: hdr_movie_adjust_process;
* Description : the function that adjust ae arithmatic and atr curve of sensor
* Input : ae result : the struct include long shutter ,short shutter long gain and short gain.
* Output : NA;
* ReturnValue : NA;
* Other :
**************************************************************************
*/
void hdr_movie_adjust_process(hdr_ae_algo_result *ae_result)
{
mini_camera_sensor *sensor = mini_this_ispdata->sensor;
u16 N_gain = ae_result->N_gain;
u16 N_gainBeforeAjust = ae_result->N_gainBeforeAjust;
u16 atr_switch_gain = hdr_movie_ae_ctrl->hdr_atr_switch_gain;
u16 gain_interval = 0;
u32 wb_lmt = ae_result->N_wb_lmt;
u32 ae_sat = ae_result->N_ae_sat;
u16 rbratio = ispv1_awb_dynamic_ccm_gain();
if(rbratio < HDR_RBRATIO_THESHOLD)
{
atr_switch_gain = HDR_ATR_SWITCH_INDOOR;
gain_interval = HDR_ATR_INTERVAL_INDOOR;
}
else
{
atr_switch_gain = HDR_ATR_SWITCH_OUTDOOR;
gain_interval = HDR_ATR_INTERVAL_OUTDOOR;
}
#ifdef HDR_MOVIE_DEBUG_MODE
if(N_gainBeforeAjust > sensor->sensor_hdr_movie.ae_arithmatic_switch_gain)
{
hdr_movie_ae_ctrl->ae_arith_method = HDR_AE_AVERAGE_MATH;
}
else
{
hdr_movie_ae_ctrl->ae_arith_method = HDR_AE_WEIGHT_MATH;
}
if(rbratio < HDR_RBRATIO_THESHOLD)
{
atr_switch_gain = sensor->sensor_hdr_movie.gain_switch2;
gain_interval = sensor->sensor_hdr_movie.gain_interval2;
}
else
{
atr_switch_gain = sensor->sensor_hdr_movie.gain_switch;
gain_interval = sensor->sensor_hdr_movie.gain_interval;
}
#endif
if(N_gainBeforeAjust >=(atr_switch_gain + gain_interval))
{
if(ATR_ON == sensor->sensor_hdr_movie.hdrInfo.atr_on)
{
ispv1_hdr_set_ATR_switch(0);
if(sensor->sensor_hdr_movie.hdrInfo.atr_over_expo_on)
sensor->sensor_hdr_movie.over_exposure_adjust(0,NULL);
}
wb_lmt = 1023;
ae_sat = 960;
ae_result->N_short_gain = ae_result->N_gain;
ae_result->N_short_shuter = ae_result->N_shuter;
}else if(N_gainBeforeAjust < (atr_switch_gain - gain_interval))
{
if( ATR_OFF == sensor->sensor_hdr_movie.hdrInfo.atr_on)
{
ispv1_hdr_set_ATR_switch(1);
}
}
if(_IS_DEBUG_AE)
{
print_info("N_gainBeforeAjust =%d,N_gain: %d, atr_switch_gain: %d,gain_interval: %d atr_on = %d ae_arithmatic_switch_gain = %d,rbratio=%d",N_gainBeforeAjust, N_gain,atr_switch_gain, gain_interval,sensor->sensor_hdr_movie.hdrInfo.atr_on,hdr_movie_ae_ctrl->ae_arith_method,rbratio);
print_info("ae_sat = %08x,wb_lmt = %08x", ae_sat,wb_lmt);
}
if(sensor->sensor_hdr_movie.set_lmt_sat)
{
sensor->sensor_hdr_movie.set_lmt_sat(wb_lmt,ae_sat);
}
ae_result->N_wb_lmt = wb_lmt;
ae_result->N_ae_sat = ae_sat;
} |
/*
* Special for sending SET commands that change GUC variables, so they go to all
* gangs, both reader and writer
*
* Can not dispatch SET commands to busy reader gangs (allocated by cursors) directly because another
* command is already in progress.
* Cursors only allocate reader gangs, so primary writer and idle reader gangs can be dispatched to.
*/
void
CdbDispatchSetCommand(const char *strCommand, bool cancelOnError)
{
CdbDispatcherState *ds;
DispatchCommandQueryParms *pQueryParms;
Gang *primaryGang;
char *queryText;
int queryTextLength;
ListCell *le;
ErrorData *qeError = NULL;
elog((Debug_print_full_dtm ? LOG : DEBUG5),
"CdbDispatchSetCommand for command = '%s'",
strCommand);
pQueryParms = cdbdisp_buildCommandQueryParms(strCommand, DF_NONE);
ds = cdbdisp_makeDispatcherState(false);
queryText = buildGpQueryString(pQueryParms, &queryTextLength);
primaryGang = AllocateGang(ds, GANGTYPE_PRIMARY_WRITER, cdbcomponent_getCdbComponentsList());
AllocateGang(ds, GANGTYPE_PRIMARY_READER, formIdleSegmentIdList());
cdbdisp_makeDispatchResults(ds, list_length(ds->allocatedGangs), cancelOnError);
cdbdisp_makeDispatchParams (ds, list_length(ds->allocatedGangs), queryText, queryTextLength);
foreach(le, ds->allocatedGangs)
{
Gang *rg = lfirst(le);
cdbdisp_dispatchToGang(ds, rg, -1);
}
addToGxactTwophaseSegments(primaryGang);
cdbdisp_waitDispatchFinish(ds);
cdbdisp_checkDispatchResult(ds, DISPATCH_WAIT_NONE);
cdbdisp_getDispatchResults(ds, &qeError);
cdbdisp_destroyDispatcherState(ds);
cdbdisp_markNamedPortalGangsDestroyed();
if (qeError)
{
ReThrowError(qeError);
}
} |
/**
* Create a route entry for a given rt_path and
* insert it into the global RIB tree.
*/
void
olsr_insert_rt_path(struct rt_path *rtp, struct tc_entry *tc, struct link_entry *link)
{
struct rt_entry *rt;
struct avl_node *node;
if (tc->path_cost == ROUTE_COST_BROKEN) {
return;
}
if (rtp->rtp_dst.prefix_len > olsr_cnf->maxplen) {
return;
}
node = avl_find(&routingtree, &rtp->rtp_dst);
if (!node) {
rt = olsr_alloc_rt_entry(&rtp->rtp_dst);
if (!rt) {
return;
}
} else {
rt = rt_tree2rt(node);
}
rtp->rtp_originator = tc->addr;
rtp->rtp_tree_node.key = &rtp->rtp_originator;
avl_insert(&rt->rt_path_tree, &rtp->rtp_tree_node, AVL_DUP_NO);
rtp->rtp_rt = rt;
olsr_update_rt_path(rtp, tc, link);
} |
/**
* Get config info from given filename.
* @param filename
* @return
*/
json_t* conf_get_ast_backup_config_info(const char* filename)
{
char* tmp;
char* full_filename;
json_t* j_conf;
if(filename == NULL) {
slog(LOG_WARNING, "Wrong input parameter.");
return NULL;
}
slog(LOG_DEBUG, "Fired get_ast_backup_config_info_json.");
tmp = get_ast_backup_conf_dir();
asprintf(&full_filename, "%s/%s", tmp, filename);
sfree(tmp);
j_conf = get_ast_config_info_object(full_filename);
sfree(full_filename);
if(j_conf == NULL) {
slog(LOG_ERR, "Could not get config file info.");
return NULL;
}
return j_conf;
} |
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <poll.h>
#include <sys/wait.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <stdbool.h>
#include <pthread.h>
#include <logwrap/logwrap.h>
#include "private/android_filesystem_config.h"
#include "cutils/log.h"
#include <cutils/klog.h>
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
#define MIN(a,b) (((a)<(b))?(a):(b))
static pthread_mutex_t fd_mutex = PTHREAD_MUTEX_INITIALIZER;
#define ERROR(fmt, args...) \
do { \
fprintf(stderr, fmt, ## args); \
ALOG(LOG_ERROR, "logwrapper", fmt, ## args); \
} while(0)
#define FATAL_CHILD(fmt, args...) \
do { \
ERROR(fmt, ## args); \
_exit(-1); \
} while(0)
#define MAX_KLOG_TAG 16
/* This is a simple buffer that holds up to the first beginning_buf->buf_size
* bytes of output from a command.
*/
#define BEGINNING_BUF_SIZE 0x1000
struct beginning_buf {
char *buf;
size_t alloc_len;
/* buf_size is the usable space, which is one less than the allocated size */
size_t buf_size;
size_t used_len;
};
/* This is a circular buf that holds up to the last ending_buf->buf_size bytes
* of output from a command after the first beginning_buf->buf_size bytes
* (which are held in beginning_buf above).
*/
#define ENDING_BUF_SIZE 0x1000
struct ending_buf {
char *buf;
ssize_t alloc_len;
/* buf_size is the usable space, which is one less than the allocated size */
ssize_t buf_size;
ssize_t used_len;
/* read and write offsets into the circular buffer */
int read;
int write;
};
/* A structure to hold all the abbreviated buf data */
struct abbr_buf {
struct beginning_buf b_buf;
struct ending_buf e_buf;
int beginning_buf_full;
};
/* Collect all the various bits of info needed for logging in one place. */
struct log_info {
int log_target;
char klog_fmt[MAX_KLOG_TAG * 2];
char *btag;
bool abbreviated;
FILE *fp;
struct abbr_buf a_buf;
};
/* Forware declaration */
static void add_line_to_abbr_buf(struct abbr_buf *a_buf, char *linebuf, int linelen);
/* Return 0 on success, and 1 when full */
static int add_line_to_linear_buf(struct beginning_buf *b_buf,
char *line, ssize_t line_len)
{
int full = 0;
if ((line_len + b_buf->used_len) > b_buf->buf_size) {
full = 1;
} else {
/* Add to the end of the buf */
memcpy(b_buf->buf + b_buf->used_len, line, line_len);
b_buf->used_len += line_len;
}
return full;
}
static void add_line_to_circular_buf(struct ending_buf *e_buf,
char *line, ssize_t line_len)
{
ssize_t free_len;
ssize_t needed_space;
int cnt;
if (e_buf->buf == NULL) {
return;
}
if (line_len > e_buf->buf_size) {
return;
}
free_len = e_buf->buf_size - e_buf->used_len;
if (line_len > free_len) {
/* remove oldest entries at read, and move read to make
* room for the new string */
needed_space = line_len - free_len;
e_buf->read = (e_buf->read + needed_space) % e_buf->buf_size;
e_buf->used_len -= needed_space;
}
/* Copy the line into the circular buffer, dealing with possible
* wraparound.
*/
cnt = MIN(line_len, e_buf->buf_size - e_buf->write);
memcpy(e_buf->buf + e_buf->write, line, cnt);
if (cnt < line_len) {
memcpy(e_buf->buf, line + cnt, line_len - cnt);
}
e_buf->used_len += line_len;
e_buf->write = (e_buf->write + line_len) % e_buf->buf_size;
}
/* Log directly to the specified log */
static void do_log_line(struct log_info *log_info, char *line) {
if (log_info->log_target & LOG_KLOG) {
klog_write(6, log_info->klog_fmt, line);
}
if (log_info->log_target & LOG_ALOG) {
ALOG(LOG_INFO, log_info->btag, "%s", line);
}
if (log_info->log_target & LOG_FILE) {
fprintf(log_info->fp, "%s\n", line);
}
}
/* Log to either the abbreviated buf, or directly to the specified log
* via do_log_line() above.
*/
static void log_line(struct log_info *log_info, char *line, int len) {
if (log_info->abbreviated) {
add_line_to_abbr_buf(&log_info->a_buf, line, len);
} else {
do_log_line(log_info, line);
}
}
/*
* The kernel will take a maximum of 1024 bytes in any single write to
* the kernel logging device file, so find and print each line one at
* a time. The allocated size for buf should be at least 1 byte larger
* than buf_size (the usable size of the buffer) to make sure there is
* room to temporarily stuff a null byte to terminate a line for logging.
*/
static void print_buf_lines(struct log_info *log_info, char *buf, int buf_size)
{
char *line_start;
char c;
int i;
line_start = buf;
for (i = 0; i < buf_size; i++) {
if (*(buf + i) == '\n') {
/* Found a line ending, print the line and compute new line_start */
/* Save the next char and replace with \0 */
c = *(buf + i + 1);
*(buf + i + 1) = '\0';
do_log_line(log_info, line_start);
/* Restore the saved char */
*(buf + i + 1) = c;
line_start = buf + i + 1;
} else if (*(buf + i) == '\0') {
/* The end of the buffer, print the last bit */
do_log_line(log_info, line_start);
break;
}
}
/* If the buffer was completely full, and didn't end with a newline, just
* ignore the partial last line.
*/
}
static void init_abbr_buf(struct abbr_buf *a_buf) {
char *new_buf;
memset(a_buf, 0, sizeof(struct abbr_buf));
new_buf = malloc(BEGINNING_BUF_SIZE);
if (new_buf) {
a_buf->b_buf.buf = new_buf;
a_buf->b_buf.alloc_len = BEGINNING_BUF_SIZE;
a_buf->b_buf.buf_size = BEGINNING_BUF_SIZE - 1;
}
new_buf = malloc(ENDING_BUF_SIZE);
if (new_buf) {
a_buf->e_buf.buf = new_buf;
a_buf->e_buf.alloc_len = ENDING_BUF_SIZE;
a_buf->e_buf.buf_size = ENDING_BUF_SIZE - 1;
}
}
static void free_abbr_buf(struct abbr_buf *a_buf) {
free(a_buf->b_buf.buf);
free(a_buf->e_buf.buf);
}
static void add_line_to_abbr_buf(struct abbr_buf *a_buf, char *linebuf, int linelen) {
if (!a_buf->beginning_buf_full) {
a_buf->beginning_buf_full =
add_line_to_linear_buf(&a_buf->b_buf, linebuf, linelen);
}
if (a_buf->beginning_buf_full) {
add_line_to_circular_buf(&a_buf->e_buf, linebuf, linelen);
}
}
static void print_abbr_buf(struct log_info *log_info) {
struct abbr_buf *a_buf = &log_info->a_buf;
/* Add the abbreviated output to the kernel log */
if (a_buf->b_buf.alloc_len) {
print_buf_lines(log_info, a_buf->b_buf.buf, a_buf->b_buf.used_len);
}
/* Print an ellipsis to indicate that the buffer has wrapped or
* is full, and some data was not logged.
*/
if (a_buf->e_buf.used_len == a_buf->e_buf.buf_size) {
do_log_line(log_info, "...\n");
}
if (a_buf->e_buf.used_len == 0) {
return;
}
/* Simplest way to print the circular buffer is allocate a second buf
* of the same size, and memcpy it so it's a simple linear buffer,
* and then cal print_buf_lines on it */
if (a_buf->e_buf.read < a_buf->e_buf.write) {
/* no wrap around, just print it */
print_buf_lines(log_info, a_buf->e_buf.buf + a_buf->e_buf.read,
a_buf->e_buf.used_len);
} else {
/* The circular buffer will always have at least 1 byte unused,
* so by allocating alloc_len here we will have at least
* 1 byte of space available as required by print_buf_lines().
*/
char * nbuf = malloc(a_buf->e_buf.alloc_len);
if (!nbuf) {
return;
}
int first_chunk_len = a_buf->e_buf.buf_size - a_buf->e_buf.read;
memcpy(nbuf, a_buf->e_buf.buf + a_buf->e_buf.read, first_chunk_len);
/* copy second chunk */
memcpy(nbuf + first_chunk_len, a_buf->e_buf.buf, a_buf->e_buf.write);
print_buf_lines(log_info, nbuf, first_chunk_len + a_buf->e_buf.write);
free(nbuf);
}
}
static int parent(const char *tag, int parent_read, pid_t pid,
int *chld_sts, int log_target, bool abbreviated, char *file_path) {
int status = 0;
char buffer[4096];
struct pollfd poll_fds[] = {
[0] = {
.fd = parent_read,
.events = POLLIN,
},
};
int rc = 0;
int fd;
struct log_info log_info;
int a = 0; // start index of unprocessed data
int b = 0; // end index of unprocessed data
int sz;
bool found_child = false;
char tmpbuf[256];
log_info.btag = basename(tag);
if (!log_info.btag) {
log_info.btag = (char*) tag;
}
if (abbreviated && (log_target == LOG_NONE)) {
abbreviated = 0;
}
if (abbreviated) {
init_abbr_buf(&log_info.a_buf);
}
if (log_target & LOG_KLOG) {
snprintf(log_info.klog_fmt, sizeof(log_info.klog_fmt),
"<6>%.*s: %%s\n", MAX_KLOG_TAG, log_info.btag);
}
if ((log_target & LOG_FILE) && !file_path) {
/* No file_path specified, clear the LOG_FILE bit */
log_target &= ~LOG_FILE;
}
if (log_target & LOG_FILE) {
fd = open(file_path, O_WRONLY | O_CREAT, 0664);
if (fd < 0) {
ERROR("Cannot log to file %s\n", file_path);
log_target &= ~LOG_FILE;
} else {
lseek(fd, 0, SEEK_END);
log_info.fp = fdopen(fd, "a");
}
}
log_info.log_target = log_target;
log_info.abbreviated = abbreviated;
while (!found_child) {
if (TEMP_FAILURE_RETRY(poll(poll_fds, ARRAY_SIZE(poll_fds), -1)) < 0) {
ERROR("poll failed\n");
rc = -1;
goto err_poll;
}
if (poll_fds[0].revents & POLLIN) {
sz = read(parent_read, &buffer[b], sizeof(buffer) - 1 - b);
sz += b;
// Log one line at a time
for (b = 0; b < sz; b++) {
if (buffer[b] == '\r') {
if (abbreviated) {
/* The abbreviated logging code uses newline as
* the line separator. Lucikly, the pty layer
* helpfully cooks the output of the command
* being run and inserts a CR before NL. So
* I just change it to NL here when doing
* abbreviated logging.
*/
buffer[b] = '\n';
} else {
buffer[b] = '\0';
}
} else if (buffer[b] == '\n') {
buffer[b] = '\0';
log_line(&log_info, &buffer[a], b - a);
a = b + 1;
}
}
if (a == 0 && b == sizeof(buffer) - 1) {
// buffer is full, flush
buffer[b] = '\0';
log_line(&log_info, &buffer[a], b - a);
b = 0;
} else if (a != b) {
// Keep left-overs
b -= a;
memmove(buffer, &buffer[a], b);
a = 0;
} else {
a = 0;
b = 0;
}
}
if (poll_fds[0].revents & POLLHUP) {
int ret;
ret = waitpid(pid, &status, WNOHANG);
if (ret < 0) {
rc = errno;
ALOG(LOG_ERROR, "logwrap", "waitpid failed with %s\n", strerror(errno));
goto err_waitpid;
}
if (ret > 0) {
found_child = true;
}
}
}
if (chld_sts != NULL) {
*chld_sts = status;
} else {
if (WIFEXITED(status))
rc = WEXITSTATUS(status);
else
rc = -ECHILD;
}
// Flush remaining data
if (a != b) {
buffer[b] = '\0';
log_line(&log_info, &buffer[a], b - a);
}
/* All the output has been processed, time to dump the abbreviated output */
if (abbreviated) {
print_abbr_buf(&log_info);
}
if (WIFEXITED(status)) {
if (WEXITSTATUS(status)) {
snprintf(tmpbuf, sizeof(tmpbuf),
"%s terminated by exit(%d)\n", log_info.btag, WEXITSTATUS(status));
do_log_line(&log_info, tmpbuf);
}
} else {
if (WIFSIGNALED(status)) {
snprintf(tmpbuf, sizeof(tmpbuf),
"%s terminated by signal %d\n", log_info.btag, WTERMSIG(status));
do_log_line(&log_info, tmpbuf);
} else if (WIFSTOPPED(status)) {
snprintf(tmpbuf, sizeof(tmpbuf),
"%s stopped by signal %d\n", log_info.btag, WSTOPSIG(status));
do_log_line(&log_info, tmpbuf);
}
}
err_waitpid:
err_poll:
if (log_target & LOG_FILE) {
fclose(log_info.fp); /* Also closes underlying fd */
}
if (abbreviated) {
free_abbr_buf(&log_info.a_buf);
}
return rc;
}
static void child(int argc, char* argv[]) {
// create null terminated argv_child array
char* argv_child[argc + 1];
memcpy(argv_child, argv, argc * sizeof(char *));
argv_child[argc] = NULL;
if (execvp(argv_child[0], argv_child)) {
FATAL_CHILD("executing %s failed: %s\n", argv_child[0],
strerror(errno));
}
}
int android_fork_execvp_ext(int argc, char* argv[], int *status, bool ignore_int_quit,
int log_target, bool abbreviated, char *file_path) {
pid_t pid;
int parent_ptty;
int child_ptty;
struct sigaction intact;
struct sigaction quitact;
sigset_t blockset;
sigset_t oldset;
int rc = 0;
rc = pthread_mutex_lock(&fd_mutex);
if (rc) {
ERROR("failed to lock signal_fd mutex\n");
goto err_lock;
}
/* Use ptty instead of socketpair so that STDOUT is not buffered */
parent_ptty = open("/dev/ptmx", O_RDWR);
if (parent_ptty < 0) {
ERROR("Cannot create parent ptty\n");
rc = -1;
goto err_open;
}
char child_devname[64];
if (grantpt(parent_ptty) || unlockpt(parent_ptty) ||
ptsname_r(parent_ptty, child_devname, sizeof(child_devname)) != 0) {
ERROR("Problem with /dev/ptmx\n");
rc = -1;
goto err_ptty;
}
child_ptty = open(child_devname, O_RDWR);
if (child_ptty < 0) {
ERROR("Cannot open child_ptty\n");
rc = -1;
goto err_child_ptty;
}
sigemptyset(&blockset);
sigaddset(&blockset, SIGINT);
sigaddset(&blockset, SIGQUIT);
pthread_sigmask(SIG_BLOCK, &blockset, &oldset);
pid = fork();
if (pid < 0) {
close(child_ptty);
ERROR("Failed to fork\n");
rc = -1;
goto err_fork;
} else if (pid == 0) {
pthread_mutex_unlock(&fd_mutex);
pthread_sigmask(SIG_SETMASK, &oldset, NULL);
close(parent_ptty);
// redirect stdout and stderr
dup2(child_ptty, 1);
dup2(child_ptty, 2);
close(child_ptty);
child(argc, argv);
} else {
close(child_ptty);
if (ignore_int_quit) {
struct sigaction ignact;
memset(&ignact, 0, sizeof(ignact));
ignact.sa_handler = SIG_IGN;
sigaction(SIGINT, &ignact, &intact);
sigaction(SIGQUIT, &ignact, &quitact);
}
rc = parent(argv[0], parent_ptty, pid, status, log_target,
abbreviated, file_path);
}
if (ignore_int_quit) {
sigaction(SIGINT, &intact, NULL);
sigaction(SIGQUIT, &quitact, NULL);
}
err_fork:
pthread_sigmask(SIG_SETMASK, &oldset, NULL);
err_child_ptty:
err_ptty:
close(parent_ptty);
err_open:
pthread_mutex_unlock(&fd_mutex);
err_lock:
return rc;
}
|
#ifndef SHIFT_CURRENT_SOLVER_H
#define SHIFT_CURRENT_SOLVER_H
#include "base_data.h"
#include "band_structure_solver.h"
#include "berry_connection_solver.h"
#include "velocity_solver.h"
#include "linear_response.h"
#include <set>
#include <map>
class shift_current_solver
{
public:
void set_parameters(
const int &nspin,
const int &omega_num,
const double &domega,
const double &start_omega,
const int &smearing_method,
const double &eta
);
MatrixXd get_shift_current_conductivity(
base_data &Base_Data,
const MatrixXd &k_direct_coor,
const int &total_kpoint_num,
const int &occupied_num,
const int &method
);
private:
MatrixXd get_shift_current_conductivity_ik(
base_data &Base_Data,
const VectorXcd &exp_ikR,
const int &occupied_num,
const int &method
);
// m is the band indicator of the occupied state,
// n is the band indicator of the unoccupied state,
// and n, m is a pair of indicators, so the dimensions of m and n are the same.
MatrixXd get_Inm_ik_direct(
base_data &Base_Data,
const VectorXcd &exp_ikR,
const VectorXd &eigenvalues,
const MatrixXcd &eigenvectors,
const std::vector<int> m,
const std::vector<int> n
);
// m is the band indicator of the occupied state,
// n is the band indicator of the unoccupied state,
// and n, m is a pair of indicators, so the dimensions of m and n are the same.
MatrixXd get_Inm_ik_sumOver(
base_data &Base_Data,
const VectorXcd &exp_ikR,
const VectorXd &eigenvalues,
const MatrixXcd &eigenvectors,
const std::vector<int> m,
const std::vector<int> n
);
int nspin = 1;
int omega_num;
double domega;
double start_omega;
int smearing_method = 1;
double eta;
};
#endif |
// WARNING: Could not reconcile some variable overlaps
// DWARF DIE: 2e31
AT_ERROR_CODE deep_sleep_handler(at_para_t *at_para)
{
AT_ERROR_CODE extraout_a0;
int iVar1;
int iStack52;
s32 paracnt;
at_gpiowakeup_para_t wakeupParam;
at_para_descriptor_t cmd_para_list [2];
cmd_para_list[0]._0_4_ = ¶cnt;
cmd_para_list[0].pvar = (void *)0xc04;
cmd_para_list[1].pvar = (void *)0xc04;
wakeupParam.gpioId._0_1_ = 3;
cmd_para_list[0].option._0_1_ = 3;
cmd_para_list[1]._0_4_ = &wakeupParam;
paracnt = 0;
wakeupParam.sleep_time = 0;
if (*at_para->ptr == '=') {
at_para->ptr = at_para->ptr + 1;
iVar1 = FUN_00010442();
if ((iVar1 == 0) && (0 < iStack52)) {
FUN_0001045e();
FUN_0001046a();
return extraout_a0;
}
}
return AEC_PARA_ERROR;
} |
/*===================================================================
The Medical Imaging Interaction Toolkit (MITK)
Copyright (c) German Cancer Research Center,
Division of Medical and Biological Informatics.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE.txt or http://www.mitk.org for details.
===================================================================*/
#ifndef QMITKDATAMANAGERVIEW_H_
#define QMITKDATAMANAGERVIEW_H_
// BlueBerry includes
#include <berryIBerryPreferences.h>
/// Qmitk
#include <QmitkAbstractView.h>
#include <QmitkNodeDescriptorManager.h>
/// Qt
#include <QItemSelection>
#include <org_mitk_gui_qt_datamanager_Export.h>
// Forward declarations
class QMenu;
class QAction;
class QComboBox;
class QWidgetAction;
class QSlider;
class QModelIndex;
class QTreeView;
class QPushButton;
class QToolBar;
class QMenu;
class QSignalMapper;
class QmitkDnDFrameWidget;
class QmitkDataStorageTreeModel;
class QmitkDataManagerItemDelegate;
class QmitkNumberPropertySlider;
class QmitkDataStorageFilterProxyModel;
///
/// \ingroup org_mitk_gui_qt_datamanager_internal
///
/// \brief A View class that can show all data tree nodes of a certain DataStorage
///
/// \TODO: complete PACS support, in save dialog show regular filename
///
class MITK_QT_DATAMANAGER QmitkDataManagerView : public QmitkAbstractView
{
Q_OBJECT
public:
static const QString VIEW_ID; // = "org.mitk.extapp.defaultperspective"
///
/// \brief Standard ctor.
///
QmitkDataManagerView();
///
/// \brief Standard dtor.
///
virtual ~QmitkDataManagerView();
public slots:
///
/// Invoked when the opacity slider changed
///
void OpacityChanged(int value);
///
/// Invoked when the opacity action changed
/// In this function the the opacity slider is set to the selected nodes opacity value
///
void OpacityActionChanged();
/// Invoked when the component action changed
/// In this function the the opacity slider is set to the selected nodes opacity value
///
void ComponentActionChanged();
///
/// Invoked when the color button is pressed
///
void ColorChanged();
///
/// Invoked when the color action changed
///
void ColorActionChanged();
///
/// Invoked when the color button is pressed
///
void TextureInterpolationChanged();
///
/// Invoked when the color action changed
///
void TextureInterpolationToggled ( bool checked );
///
/// \brief Agreggates available colormaps
///
void ColormapMenuAboutToShow ();
///
/// \brief changes the active colormap
///
void ColormapActionToggled (bool);
///
/// SurfaceRepresentationActionToggled
///
void SurfaceRepresentationMenuAboutToShow ();
///
/// SurfaceRepresentationActionToggled
///
void SurfaceRepresentationActionToggled ( bool checked );
///
/// \brief Shows a node context menu.
///
void NodeTableViewContextMenuRequested( const QPoint & index );
///
/// \brief Invoked when an element should be removed.
///
void RemoveSelectedNodes( bool checked = false );
///
/// \brief Invoked when an element should be reinitiliased.
///
void ReinitSelectedNodes( bool checked = false );
///
/// \brief Invoked when the visibility of the selected nodes should be toggled.
///
void MakeAllNodesInvisible ( bool checked = false );
///
/// \brief Makes all selected nodes visible, all other nodes invisible.
///
void ShowOnlySelectedNodes ( bool checked = false );
///
/// \brief Invoked when the visibility of the selected nodes should be toggled.
///
void ToggleVisibilityOfSelectedNodes ( bool checked = false );
///
/// \brief Invoked when infos of the selected nodes should be shown in a dialog.
///
void ShowInfoDialogForSelectedNodes ( bool checked = false );
///
/// \brief Reinits everything.
///
void GlobalReinit ( bool checked = false );
///
/// Invoked when the preferences were changed
///
void OnPreferencesChanged(const berry::IBerryPreferences*) override;
///
/// \brief will be toggled when a extension point context menu action is toggled
/// this is a proxy method which will load the corresponding extension class
/// and run IContextMenuAction
///
void ContextMenuActionTriggered( bool );
/// When rows are inserted auto expand them
void NodeTreeViewRowsInserted ( const QModelIndex & parent, int start, int end );
/// will setup m_CurrentRowCount
void NodeTreeViewRowsRemoved ( const QModelIndex & parent, int start, int end );
/// Whenever the selection changes set the "selected" property respectively
void NodeSelectionChanged( const QItemSelection & selected, const QItemSelection & deselected );
/// Opens the editor with the given id using the current data storage
void ShowIn(const QString& editorId);
protected:
///
/// \brief Create the view here.
///
virtual void CreateQtPartControl(QWidget* parent) override;
void SetFocus() override;
///
/// \brief Shows a file open dialog.
///
void FileOpen( const char * fileName, mitk::DataNode* parentNode );
///
/// React to node changes. Overridden from QmitkAbstractView.
///
virtual void NodeChanged(const mitk::DataNode* /*node*/) override;
protected:
QWidget* m_Parent;
QmitkDnDFrameWidget* m_DndFrameWidget;
///
/// \brief A plain widget as the base pane.
///
QmitkDataStorageTreeModel* m_NodeTreeModel;
QmitkDataStorageFilterProxyModel* m_FilterModel;
mitk::NodePredicateBase::Pointer m_HelperObjectFilterPredicate;
mitk::NodePredicateBase::Pointer m_NodeWithNoDataFilterPredicate;
///
/// Holds the preferences for the datamanager.
///
berry::IBerryPreferences::Pointer m_DataManagerPreferencesNode;
///
/// saves the configuration elements for the context menu actions from extension points
///
std::map<QAction*, berry::IConfigurationElement::Pointer> m_ConfElements;
///
/// \brief The Table view to show the selected nodes.
///
QTreeView* m_NodeTreeView;
///
/// \brief The context menu that shows up when right clicking on a node.
///
QMenu* m_NodeMenu;
///
/// \brief flag indicating whether a surface created from a selected decimation is decimated with vtkQuadricDecimation or not
///
bool m_SurfaceDecimation;
///# A list of ALL actions for the Context Menu
std::vector< std::pair< QmitkNodeDescriptor*, QAction* > > m_DescriptorActionList;
/// A Slider widget to change the opacity of a node
QSlider* m_OpacitySlider;
/// A Slider widget to change the rendered vector component of an image
QmitkNumberPropertySlider* m_ComponentSlider;
/// button to change the color of a node
QPushButton* m_ColorButton;
/// TextureInterpolation action
QAction* m_TextureInterpolation;
/// SurfaceRepresentation action
QAction* m_SurfaceRepresentation;
/// Lookuptable selection action
QAction* m_ColormapAction;
/// Maps "Show in" actions to editor ids
QSignalMapper* m_ShowInMapper;
/// A list of "Show in" actions
QList<QAction*> m_ShowInActions;
/// saves the current amount of rows shown in the datamanager
size_t m_CurrentRowCount;
/// if true, GlobalReinit() is called if a node is deleted
bool m_GlobalReinitOnNodeDelete;
QmitkDataManagerItemDelegate* m_ItemDelegate;
private:
QItemSelectionModel* GetDataNodeSelectionModel() const override;
/// Reopen multi widget editor if it has been closed
mitk::IRenderWindowPart *OpenRenderWindowPart(bool activatedEditor = true);
};
#endif /*QMITKDATAMANAGERVIEW_H_*/
|
//+doc locate an executable in PATH
//+depends access getenv
//+def
int where(const char *file,char *buf){
char *path = getenv("PATH");
if ( !path ){
path = "/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/bin:/local/bin";
}
char *p = buf;
do {
*p = *path;
p++; path++;
if ( *path == ':' || *path == 0 ){
*p++ = '/';
for ( char *c= (char*)file; (*p++ = *c++); );
if ( access( buf, R_OK | X_OK ) == 0 ){
return( 1 );
}
if ( !*path )
return(0);
p=buf;
path++;
}
} while (1);
} |
#include<stdio.h>
#define swap(a,b) a^=b;b^=a;a^=b
int main(){
int n,m,i,j,A[100],B[100],a_top,b_top,ind_min;
scanf("%d",&n);
for(i=0;i<n;i++){
scanf("%d",&A[i]);
}
for(i=0;i<n-1;i++){
ind_min=i;
for(j=i+1;j<n;j++){
if(A[j]<A[ind_min]){
ind_min=j;
}
}
if(ind_min!=i){
swap(A[i],A[ind_min]);
}
}
a_top=A[n-1];
scanf("%d",&m);
for(i=0;i<m;i++){
scanf("%d",&B[i]);
}
for(i=0;i<m-1;i++){
ind_min=i;
for(j=i+1;j<m;j++){
if(B[j]<B[ind_min]){
ind_min=j;
}
}
if(ind_min!=i){
swap(B[i],B[ind_min]);
}
}
b_top=B[m-1];
printf("%d %d",a_top,b_top);
} |
/**
* Returns the number of occurrences of the element within the specified CC_Array.
*
* @param[in] ar array that is being searched
* @param[in] element the element that is being searched for
*
* @return the number of occurrences of the element.
*/
size_t cc_array_contains(CC_Array *ar, void *element)
{
size_t o = 0;
size_t i;
for (i = 0; i < ar->size; i++) {
if (ar->buffer[i] == element)
o++;
}
return o;
} |
// Try to handle request; return != 0 if handled. Function called by standard handler for unknown requests
static int rcm_handle_control(struct usb_ctrl_req_t *req, void *data)
{
static unsigned char buf[UPS_REQUESTSIZE];
if (req->recp == USB_CTRL_REQ_IFACE &&
req->type == USB_CTRL_REQ_CLASS &&
req->bRequest == UPS_REQUESTVALUE &&
req->wValue == UPS_MESSAGEVALUE &&
req->wIndex == UPS_INDEXVALUE &&
req->wLength == UPS_REQUESTSIZE)
{
usb_ep0_rx(buf, req->wLength, rcm_handle_data, NULL);
return (1);
}
return 0;
} |
#include<stdio.h>
int main()
{
long long m = 0, n = 0;
long long a = 0;
scanf("%d%d%d", &m, &n, &a);
long long r1 = (m + a - 1)/a;
long long r2 = (n + a - 1)/ a;
long long r3 = r1 * r2;
printf("%I64d", r3);
} |
/* Go back to the original screen. */
static void
endwin(void)
{
if (blink_ticking) {
RemoveTimeOut(blink_id);
blink_id = 0;
blink_ticking = False;
blink_on = True;
blink_wasticking = True;
}
if (SetConsoleMode(chandle, ENABLE_ECHO_INPUT |
ENABLE_LINE_INPUT |
ENABLE_PROCESSED_INPUT |
ENABLE_MOUSE_INPUT) == 0) {
fprintf(stderr, "\nSetConsoleMode(CONIN$) failed: %s\n",
win32_strerror(GetLastError()));
x3270_exit(1);
}
if (SetConsoleMode(cohandle, ENABLE_PROCESSED_OUTPUT |
ENABLE_WRAP_AT_EOL_OUTPUT) == 0) {
fprintf(stderr, "\nSetConsoleMode(CONOUT$) failed: %s\n",
win32_strerror(GetLastError()));
x3270_exit(1);
}
if (SetConsoleActiveScreenBuffer(cohandle) == 0) {
fprintf(stderr, "\nSetConsoleActiveScreenBuffer failed: %s\n",
win32_strerror(GetLastError()));
x3270_exit(1);
}
screen_swapped = FALSE;
} |
/**
* Parse a string for numbers.
*
* Syntax can be something like " *[+-] *[0-9]*\.[0-9]* *".
*
* The function ignore all spaces. It strips leading zeroes which could
* possibly lead to overflow.
* The function returns a pointer to the integer part followed by *p_digits
* digits followed by a dot followed by *p_decimals digits (dot and
* fractional digits are optional, in this case *p_decimals is 0).
*
* @param buf start of string
* @param pend pointer to string end
* @param p_negative store if number is negative
* @param p_digits store number of integer digits
* @param p_decimals store number of fractional digits
* @return pointer to first not zero digit. If NULL this indicate a syntax
* error.
*/
static const char *
parse_numeric(const char *buf, const char *pend, bool *p_negative, size_t *p_digits, size_t *p_decimals)
{
enum { blank = ' ' };
#define SKIP_IF(cond) while (p != pend && (cond)) ++p;
const char *p, *start;
bool negative = false;
*p_decimals = 0;
p = buf;
SKIP_IF(*p == blank);
if (p == pend) {
*p_negative = false;
*p_digits = 0;
return p;
}
switch (*p) {
case '-':
negative = true;
case '+':
++p;
SKIP_IF(*p == blank);
break;
}
*p_negative = negative;
if (p == pend)
return NULL;
SKIP_IF(*p == '0');
start = p;
SKIP_IF(TDS_ISDIGIT(*p));
*p_digits = p - start;
if (p != pend && *p == '.') {
const char *decimals_start = ++p;
SKIP_IF(TDS_ISDIGIT(*p));
*p_decimals = p - decimals_start;
}
SKIP_IF(*p == blank);
if (p != pend)
return NULL;
return start;
} |
/* ---------------------------------------------- *
* Software routine for performing sort. Function *
* simply performs quick-sort. *
* Author: Abazar *
* ---------------------------------------------- */
Hint poly_quickSort(Huint * startPtr, Huint * endPtr) {
Huint pivot;
Huint * leftPtr, * rightPtr;
Huint temp, * tempPtr;
if ( startPtr == endPtr ) { return SUCCESS; }
leftPtr = startPtr;
rightPtr = endPtr;
pivot = (*leftPtr + *rightPtr)/2;
while (leftPtr < rightPtr) {
while ((leftPtr < rightPtr) && (*leftPtr <= pivot) ) {
leftPtr++;
}
while((leftPtr <= rightPtr) && (*rightPtr > pivot) ) {
rightPtr--;
}
if ( leftPtr < rightPtr ) {
temp = *leftPtr;
*leftPtr = *rightPtr;
*rightPtr = temp;
}
}
if ( leftPtr == rightPtr ) {
if ( *rightPtr >= pivot ) {
leftPtr = rightPtr - 1;
} else {
rightPtr++;
}
} else {
if ( *rightPtr > pivot ) {
leftPtr = rightPtr - 1;
} else {
tempPtr = leftPtr;
leftPtr = rightPtr;
rightPtr = tempPtr;
}
}
poly_quickSort( rightPtr, endPtr );
poly_quickSort( startPtr, leftPtr );
return SUCCESS;
} |
/**
* Parse a string containing the index partition criteria, populating the internal
* members of struct ltfs_volume accordingly.
* @param filterrules input string containing the desired index partition criteria
* @param vol LTFS volume
* @return 0 if parsing the index partition criteria succeeds or a negative value if not.
*/
int index_criteria_parse(const char *filterrules, struct ltfs_volume *vol)
{
const char *start = NULL, *end = NULL;
struct index_criteria *ic;
bool has_name = false, error = false;
int ret = 0;
CHECK_ARG_NULL(vol, -LTFS_NULL_ARG);
if (! filterrules) {
vol->index->index_criteria.have_criteria = false;
return 0;
}
ic = &vol->index->index_criteria;
index_criteria_free(ic);
ic->have_criteria = true;
if (index_criteria_contains_invalid_options(filterrules)) {
ltfsmsg(LTFS_ERR, 11152E);
return -LTFS_POLICY_INVALID;
}
if (index_criteria_find_option(filterrules, "name=", &start, &end, &error)) {
ret = index_criteria_parse_name(start, end-start+1, ic);
if (ret < 0) {
ltfsmsg(LTFS_ERR, 11153E, ret);
return ret;
}
has_name = true;
} else if (error) {
ltfsmsg(LTFS_ERR, 11154E);
return -LTFS_POLICY_INVALID;
}
ic->max_filesize_criteria = 0;
if (index_criteria_find_option(filterrules, "size=", &start, &end, &error)) {
ret = index_criteria_parse_size(start, end-start+1, ic);
if (ret < 0) {
ltfsmsg(LTFS_ERR, 11155E, ret);
return ret;
}
} else if (error) {
ltfsmsg(LTFS_ERR, 11156E);
return -LTFS_POLICY_INVALID;
} else if (has_name) {
ltfsmsg(LTFS_ERR, 11157E);
return -LTFS_POLICY_INVALID;
}
return ret;
} |
/* free allocated memory for barycentric function */
static void bary_free(barycentric_t *bary)
{
bary->nn = 0;
free(bary->z);
free(bary->w);
free(bary->f);
} |
/*
* Memory barriers to keep this state in sync are graciously provided by
* the page table locks, outside of which no page table modifications happen.
* The barriers below prevent the compiler from re-ordering the instructions
* around the memory barriers that are already present in the code.
*/
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
{
barrier();
return mm->tlb_flush_pending;
} |
/* wait for multiple requests to complete */
int ADIOI_QUOBYTEFS_aio_wait_fn(int count, void **array_of_states, double timeout,
MPI_Status * status)
{
ADIOI_AIO_Request **aio_reqlist;
struct quobyte_io_event **events =
(struct quobyte_io_event **) ADIOI_Calloc(sizeof(struct quobyte_io_event *), count);
int i = 0;
int errcode = MPI_SUCCESS;
int num_in_progress = 0;
aio_reqlist = (ADIOI_AIO_Request **) array_of_states;
while (i < count && aio_reqlist[i] != NULL) {
struct quobyte_io_event *current_event = aio_reqlist[i]->qaiocbp;
if (current_event->errorcode == EINPROGRESS) {
events[i] = current_event;
num_in_progress++;
} else {
errcode = MPI_Grequest_complete(aio_reqlist[i]->req);
}
i++;
}
i = 0;
double start_time = MPI_Wtime();
int no_timeout = timeout > 0 ? 0 : 1;
while (num_in_progress > 0 && (no_timeout || MPI_Wtime() - start_time < timeout)) {
if (events[i] != NULL && events[i]->errorcode != EINPROGRESS) {
errcode = MPI_Grequest_complete(aio_reqlist[i]->req);
events[i] = NULL;
num_in_progress--;
}
if (i >= count) {
i = 0;
} else {
i++;
}
}
ADIOI_Free(events);
if (errcode != MPI_SUCCESS) {
errcode = MPIO_Err_create_code(MPI_SUCCESS,
MPIR_ERR_RECOVERABLE,
"ADIOI_QUOBYTEFS_aio_wait_fn",
__LINE__, MPI_ERR_IO, "**mpi_grequest_complete", 0);
}
return errcode;
} |
/*
* Write the value specified in the device tree or board code into the optional
* 16 bit Driver Stage Register. This can be used to tune raise/fall times and
* drive strength of the DAT and CMD outputs. The actual meaning of a given
* value is hardware dependant.
* The presence of the DSR register can be determined from the CSD register,
* bit 76.
*/
int mmc_set_dsr(struct mmc_host *host)
{
struct mmc_command cmd = {};
cmd.opcode = MMC_SET_DSR;
cmd.arg = (host->dsr << 16) | 0xffff;
cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
} |
/*
* syslog, vsyslog --
* print message on log file; output is intended for syslogd(8).
*/
void
__syslog(int pri, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
__vsyslog_chk(pri, -1, fmt, ap);
va_end(ap);
} |
/**
* i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
* @ceq: ceq sc structure
*/
static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
{
struct i40iw_sc_cqp *cqp;
cqp = ceq->dev->cqp;
return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
} |
/**
* @brief Convert Raw valus from BMI160 for Madgwick filter
* @param raw value
* @retval cinverted value
*/
static inline float convertRawAcceleration(int16_t aRaw)
{
float a = (aRaw * 2.0) / 32768.0;
return a;
} |
/**
* @brief compares at most the first n bytes from str1 str2.
* @details
* @param[in] str1 points to the first string
* @param[in] str2 points to the second string
* @param[in] limit denotes the number of characters to compare
* @returns value < 0 then it indicates str1 is less than str2.
* @returns value > 0 then it indicates str2 is less than str1.
* @returns value = 0 then it indicates str1 is equal to str2 within the limit of characters
*/
int32_t StrNcmp(char * str1, char * str2, int32_t limit)
{
for(; ((--limit) && (*str1 == *str2)); ++str1, ++str2)
{
if(*str1 == '\0')
return(0);
}
return(*str1 - *str2);
} |
#include<stdio.h>
int main()
{
int total, herpos, amount;
scanf("%d%d%d",&total,&herpos,&amount);
int i=0, prizes[total];
while(i<total){
scanf("%d",&prizes[i]);
i++;
}
//case01
if(herpos==1){
int distance=0;
i=0;
while(i<total){
if(prizes[i]>amount){
distance=distance+10;
i++;
}else{
if(prizes[i]==0){
distance=distance+10;
i++;
}else{
break;
}
}
}
printf("%d",distance);
}
//case02
else if(herpos==total){
int distance = 0;
i=total-1;
while(i>=0){
if(prizes[i]>amount){
distance=distance+10;
i--;
}else{
if(prizes[i]==0){
distance=distance+10;
i--;
}else{
break;
}
}
}
printf("%d",distance);
}
//case03
else{
//part01
int distance1=0,j=0;
i=0;
while(i<herpos-1){
if(prizes[i]<=amount && prizes[i]!=0){
j=1;
i++;
}
else{
if(j>=1){
j++;
i++;
}else{
j=0;
i++;
}
}
}
distance1=j*10;
//part02
int distance2=0;
i=herpos;
j=0;
while(i<total){
if(prizes[i]>amount||prizes[i]==0){
j=j+10;
i++;
}else{
j=j+10;
distance2=j;
break;
}
}
//if(i==total+1){j=0;}else{j=j-1;}
//distance2=j*10;
if(distance2==0){
printf("%d",distance1);
}
else if(distance1==0){
printf("%d",distance2);
}else if(distance1<distance2){
printf("%d",distance1);
}else{
printf("%d",distance2);
}
}
return 0;
}
|
/*
* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/rpm-smd-regulator.h>
#include <linux/clk/msm-clk.h>
#include "peripheral-loader.h"
#include "pil-q6v5.h"
/* QDSP6SS Register Offsets */
#define QDSP6SS_RESET 0x014
#define QDSP6SS_GFMUX_CTL 0x020
#define QDSP6SS_PWR_CTL 0x030
#define QDSP6SS_STRAP_ACC 0x110
/* AXI Halt Register Offsets */
#define AXI_HALTREQ 0x0
#define AXI_HALTACK 0x4
#define AXI_IDLE 0x8
#define HALT_ACK_TIMEOUT_US 100000
/* QDSP6SS_RESET */
#define Q6SS_STOP_CORE BIT(0)
#define Q6SS_CORE_ARES BIT(1)
#define Q6SS_BUS_ARES_ENA BIT(2)
/* QDSP6SS_GFMUX_CTL */
#define Q6SS_CLK_ENA BIT(1)
#define Q6SS_CLK_SRC_SEL_C BIT(3)
#define Q6SS_CLK_SRC_SEL_FIELD 0xC
#define Q6SS_CLK_SRC_SWITCH_CLK_OVR BIT(8)
/* QDSP6SS_PWR_CTL */
#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
#define Q6SS_ETB_SLP_NRET_N BIT(17)
#define Q6SS_L2DATA_STBY_N BIT(18)
#define Q6SS_SLP_RET_N BIT(19)
#define Q6SS_CLAMP_IO BIT(20)
#define QDSS_BHS_ON BIT(21)
#define QDSS_LDO_BYP BIT(22)
/* QDSP6v55 parameters */
#define QDSP6v55_LDO_ON BIT(26)
#define QDSP6v55_LDO_BYP BIT(25)
#define QDSP6v55_BHS_ON BIT(24)
#define QDSP6v55_CLAMP_WL BIT(21)
#define QDSP6v55_CLAMP_QMC_MEM BIT(22)
#define L1IU_SLP_NRET_N BIT(15)
#define L1DU_SLP_NRET_N BIT(14)
#define L2PLRU_SLP_NRET_N BIT(13)
#define HALT_CHECK_MAX_LOOPS (200)
#define QDSP6SS_XO_CBCR (0x0038)
#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
int pil_q6v5_make_proxy_votes(struct pil_desc *pil)
{
int ret;
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
ret = clk_prepare_enable(drv->xo);
if (ret) {
dev_err(pil->dev, "Failed to vote for XO\n");
goto out;
}
ret = clk_prepare_enable(drv->pnoc_clk);
if (ret) {
dev_err(pil->dev, "Failed to vote for pnoc\n");
goto err_pnoc_vote;
}
ret = regulator_set_voltage(drv->vreg_cx,
RPM_REGULATOR_CORNER_SUPER_TURBO,
RPM_REGULATOR_CORNER_SUPER_TURBO);
if (ret) {
dev_err(pil->dev, "Failed to request vdd_cx voltage.\n");
goto err_cx_voltage;
}
ret = regulator_set_optimum_mode(drv->vreg_cx, 100000);
if (ret < 0) {
dev_err(pil->dev, "Failed to set vdd_cx mode.\n");
goto err_cx_mode;
}
ret = regulator_enable(drv->vreg_cx);
if (ret) {
dev_err(pil->dev, "Failed to vote for vdd_cx\n");
goto err_cx_enable;
}
if (drv->vreg_pll) {
ret = regulator_enable(drv->vreg_pll);
if (ret) {
dev_err(pil->dev, "Failed to vote for vdd_pll\n");
goto err_vreg_pll;
}
}
return 0;
err_vreg_pll:
regulator_disable(drv->vreg_cx);
err_cx_enable:
regulator_set_optimum_mode(drv->vreg_cx, 0);
err_cx_mode:
regulator_set_voltage(drv->vreg_cx, RPM_REGULATOR_CORNER_NONE,
RPM_REGULATOR_CORNER_SUPER_TURBO);
err_cx_voltage:
clk_disable_unprepare(drv->pnoc_clk);
err_pnoc_vote:
clk_disable_unprepare(drv->xo);
out:
return ret;
}
EXPORT_SYMBOL(pil_q6v5_make_proxy_votes);
void pil_q6v5_remove_proxy_votes(struct pil_desc *pil)
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
if (drv->vreg_pll) {
regulator_disable(drv->vreg_pll);
regulator_set_optimum_mode(drv->vreg_pll, 0);
}
regulator_disable(drv->vreg_cx);
regulator_set_optimum_mode(drv->vreg_cx, 0);
regulator_set_voltage(drv->vreg_cx, RPM_REGULATOR_CORNER_NONE,
RPM_REGULATOR_CORNER_SUPER_TURBO);
clk_disable_unprepare(drv->xo);
clk_disable_unprepare(drv->pnoc_clk);
}
EXPORT_SYMBOL(pil_q6v5_remove_proxy_votes);
void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
{
int ret;
u32 status;
/* Assert halt request */
writel_relaxed(1, halt_base + AXI_HALTREQ);
/* Wait for halt */
ret = readl_poll_timeout(halt_base + AXI_HALTACK,
status, status != 0, 50, HALT_ACK_TIMEOUT_US);
if (ret)
dev_warn(pil->dev, "Port %p halt timeout\n", halt_base);
else if (!readl_relaxed(halt_base + AXI_IDLE))
dev_warn(pil->dev, "Port %p halt failed\n", halt_base);
/* Clear halt request (port will remain halted until reset) */
writel_relaxed(0, halt_base + AXI_HALTREQ);
}
EXPORT_SYMBOL(pil_q6v5_halt_axi_port);
static void __pil_q6v5_shutdown(struct pil_desc *pil)
{
u32 val;
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
/* Turn off core clock */
val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
val &= ~Q6SS_CLK_ENA;
writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
/* Clamp IO */
val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
val |= Q6SS_CLAMP_IO;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
/* Turn off Q6 memories */
val &= ~(Q6SS_L2DATA_SLP_NRET_N_0 | Q6SS_L2DATA_SLP_NRET_N_1 |
Q6SS_L2DATA_SLP_NRET_N_2 | Q6SS_SLP_RET_N |
Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLP_NRET_N |
Q6SS_L2DATA_STBY_N);
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
/* Assert Q6 resets */
val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA);
writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
/* Kill power at block headswitch */
val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
val &= ~QDSS_BHS_ON;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
}
void pil_q6v5_shutdown(struct pil_desc *pil)
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
if (drv->qdsp6v55)
/* Subsystem driver expected to halt bus and assert reset */
return;
else
__pil_q6v5_shutdown(pil);
}
EXPORT_SYMBOL(pil_q6v5_shutdown);
static int __pil_q6v5_reset(struct pil_desc *pil)
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
u32 val;
/* Assert resets, stop core */
val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
/* Enable power block headswitch, and wait for it to stabilize */
val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
val |= QDSS_BHS_ON | QDSS_LDO_BYP;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
mb();
udelay(1);
/*
* Turn on memories. L2 banks should be done individually
* to minimize inrush current.
*/
val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
val |= Q6SS_L2DATA_SLP_NRET_N_2;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
val |= Q6SS_L2DATA_SLP_NRET_N_1;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
val |= Q6SS_L2DATA_SLP_NRET_N_0;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
/* Remove IO clamp */
val &= ~Q6SS_CLAMP_IO;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
/* Bring core out of reset */
val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
val &= ~Q6SS_CORE_ARES;
writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
/* Turn on core clock */
val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
val |= Q6SS_CLK_ENA;
/* Need a different clock source for v5.2.0 */
if (drv->qdsp6v5_2_0) {
val &= ~Q6SS_CLK_SRC_SEL_FIELD;
val |= Q6SS_CLK_SRC_SEL_C;
}
/* force clock on during source switch */
if (drv->qdsp6v56)
val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;
writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
/* Start core execution */
val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
val &= ~Q6SS_STOP_CORE;
writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
return 0;
}
static int q6v55_branch_clk_enable(struct q6v5_data *drv)
{
u32 val, count;
void __iomem *cbcr_reg = drv->reg_base + QDSP6SS_XO_CBCR;
val = readl_relaxed(cbcr_reg);
val |= 0x1;
writel_relaxed(val, cbcr_reg);
for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
val = readl_relaxed(cbcr_reg);
if (!(val & BIT(31)))
return 0;
udelay(1);
}
dev_err(drv->desc.dev, "Failed to enable xo branch clock.\n");
return -EINVAL;
}
static int __pil_q6v55_reset(struct pil_desc *pil)
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
u32 val;
int i;
/* Override the ACC value if required */
if (drv->override_acc)
writel_relaxed(QDSP6SS_ACC_OVERRIDE_VAL,
drv->reg_base + QDSP6SS_STRAP_ACC);
/* Assert resets, stop core */
val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
/* BHS require xo cbcr to be enabled */
i = q6v55_branch_clk_enable(drv);
if (i)
return i;
/* Enable power block headswitch, and wait for it to stabilize */
val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
val |= QDSP6v55_BHS_ON;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
mb();
udelay(1);
val |= QDSP6v55_LDO_BYP;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
if (drv->qdsp6v56_1_3) {
/* Deassert memory peripheral sleep and L2 memory standby */
val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
/* Turn on L1, L2 and ETB memories 1 at a time */
for (i = 17; i >= 0; i--) {
val |= BIT(i);
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
udelay(1);
}
} else {
/* Turn on memories. */
val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
val |= 0xFFF00;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
/* Turn on L2 banks 1 at a time */
for (i = 0; i <= 7; i++) {
val |= BIT(i);
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
}
}
/* Remove word line clamp */
val &= ~QDSP6v55_CLAMP_WL;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
/* Remove IO clamp */
val &= ~Q6SS_CLAMP_IO;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
/* Remove QMC_MEM clamp */
val &= ~QDSP6v55_CLAMP_QMC_MEM;
writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
/* Bring core out of reset */
val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
val &= ~(Q6SS_CORE_ARES | Q6SS_STOP_CORE);
writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
/* Turn on core clock */
val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
val |= Q6SS_CLK_ENA;
writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
return 0;
}
int pil_q6v5_reset(struct pil_desc *pil)
{
struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
if (drv->qdsp6v55)
return __pil_q6v55_reset(pil);
else
return __pil_q6v5_reset(pil);
}
EXPORT_SYMBOL(pil_q6v5_reset);
struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
{
struct q6v5_data *drv;
struct resource *res;
struct pil_desc *desc;
int ret;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
return ERR_PTR(-ENOMEM);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
drv->reg_base = devm_request_and_ioremap(&pdev->dev, res);
if (!drv->reg_base)
return ERR_PTR(-ENOMEM);
desc = &drv->desc;
ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
&desc->name);
if (ret)
return ERR_PTR(ret);
desc->dev = &pdev->dev;
drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
"qcom,pil-femto-modem");
if (drv->qdsp6v5_2_0)
return drv;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
if (res) {
drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!drv->axi_halt_base) {
dev_err(&pdev->dev, "Failed to map axi_halt_base.\n");
return ERR_PTR(-ENOMEM);
}
}
if (!drv->axi_halt_base) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"halt_q6");
if (res) {
drv->axi_halt_q6 = devm_ioremap(&pdev->dev,
res->start, resource_size(res));
if (!drv->axi_halt_q6) {
dev_err(&pdev->dev, "Failed to map axi_halt_q6.\n");
return ERR_PTR(-ENOMEM);
}
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"halt_modem");
if (res) {
drv->axi_halt_mss = devm_ioremap(&pdev->dev,
res->start, resource_size(res));
if (!drv->axi_halt_mss) {
dev_err(&pdev->dev, "Failed to map axi_halt_mss.\n");
return ERR_PTR(-ENOMEM);
}
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"halt_nc");
if (res) {
drv->axi_halt_nc = devm_ioremap(&pdev->dev,
res->start, resource_size(res));
if (!drv->axi_halt_nc) {
dev_err(&pdev->dev, "Failed to map axi_halt_nc.\n");
return ERR_PTR(-ENOMEM);
}
}
}
if (!(drv->axi_halt_base || (drv->axi_halt_q6 && drv->axi_halt_mss
&& drv->axi_halt_nc))) {
dev_err(&pdev->dev, "halt bases for Q6 are not defined.\n");
return ERR_PTR(-EINVAL);
}
drv->qdsp6v55 = of_device_is_compatible(pdev->dev.of_node,
"qcom,pil-q6v55-mss");
drv->qdsp6v56 = of_device_is_compatible(pdev->dev.of_node,
"qcom,pil-q6v56-mss");
drv->qdsp6v56_1_3 = of_property_read_bool(pdev->dev.of_node,
"qcom,qdsp6v56-1-3");
drv->non_elf_image = of_property_read_bool(pdev->dev.of_node,
"qcom,mba-image-is-not-elf");
drv->override_acc = of_property_read_bool(pdev->dev.of_node,
"qcom,override-acc");
drv->ahb_clk_vote = of_property_read_bool(pdev->dev.of_node,
"qcom,ahb-clk-vote");
drv->xo = devm_clk_get(&pdev->dev, "xo");
if (IS_ERR(drv->xo))
return ERR_CAST(drv->xo);
if (of_property_read_bool(pdev->dev.of_node, "qcom,pnoc-clk-vote")) {
drv->pnoc_clk = devm_clk_get(&pdev->dev, "pnoc_clk");
if (IS_ERR(drv->pnoc_clk))
return ERR_CAST(drv->pnoc_clk);
} else {
drv->pnoc_clk = NULL;
}
drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
if (IS_ERR(drv->vreg_cx))
return ERR_CAST(drv->vreg_cx);
drv->vreg_pll = devm_regulator_get(&pdev->dev, "vdd_pll");
if (!IS_ERR_OR_NULL(drv->vreg_pll)) {
int voltage;
ret = of_property_read_u32(pdev->dev.of_node, "qcom,vdd_pll",
&voltage);
if (ret) {
dev_err(&pdev->dev, "Failed to find vdd_pll voltage.\n");
return ERR_PTR(ret);
}
ret = regulator_set_voltage(drv->vreg_pll, voltage, voltage);
if (ret) {
dev_err(&pdev->dev, "Failed to request vdd_pll voltage.\n");
return ERR_PTR(ret);
}
ret = regulator_set_optimum_mode(drv->vreg_pll, 10000);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to set vdd_pll mode.\n");
return ERR_PTR(ret);
}
} else {
drv->vreg_pll = NULL;
}
return drv;
}
EXPORT_SYMBOL(pil_q6v5_init);
|
#include "git-compat-util.h"
#include "config.h"
#include "run-command.h"
#include "write-or-die.h"
/*
* Some cases use stdio, but want to flush after the write
* to get error handling (and to get better interactive
* behaviour - not buffering excessively).
*
* Of course, if the flush happened within the write itself,
* we've already lost the error code, and cannot report it any
* more. So we just ignore that case instead (and hope we get
* the right error code on the flush).
*
* If the file handle is stdout, and stdout is a file, then skip the
* flush entirely since it's not needed.
*/
void maybe_flush_or_die(FILE *f, const char *desc)
{
static int skip_stdout_flush = -1;
struct stat st;
char *cp;
if (f == stdout) {
if (skip_stdout_flush < 0) {
/* NEEDSWORK: make this a normal Boolean */
cp = getenv("GIT_FLUSH");
if (cp)
skip_stdout_flush = (atoi(cp) == 0);
else if ((fstat(fileno(stdout), &st) == 0) &&
S_ISREG(st.st_mode))
skip_stdout_flush = 1;
else
skip_stdout_flush = 0;
}
if (skip_stdout_flush && !ferror(f))
return;
}
if (fflush(f)) {
check_pipe(errno);
die_errno("write failure on '%s'", desc);
}
}
void fprintf_or_die(FILE *f, const char *fmt, ...)
{
va_list ap;
int ret;
va_start(ap, fmt);
ret = vfprintf(f, fmt, ap);
va_end(ap);
if (ret < 0) {
check_pipe(errno);
die_errno("write error");
}
}
static int maybe_fsync(int fd)
{
if (use_fsync < 0)
use_fsync = git_env_bool("GIT_TEST_FSYNC", 1);
if (!use_fsync)
return 0;
if (fsync_method == FSYNC_METHOD_WRITEOUT_ONLY &&
git_fsync(fd, FSYNC_WRITEOUT_ONLY) >= 0)
return 0;
return git_fsync(fd, FSYNC_HARDWARE_FLUSH);
}
void fsync_or_die(int fd, const char *msg)
{
if (maybe_fsync(fd) < 0)
die_errno("fsync error on '%s'", msg);
}
int fsync_component(enum fsync_component component, int fd)
{
if (fsync_components & component)
return maybe_fsync(fd);
return 0;
}
void fsync_component_or_die(enum fsync_component component, int fd, const char *msg)
{
if (fsync_components & component)
fsync_or_die(fd, msg);
}
void write_or_die(int fd, const void *buf, size_t count)
{
if (write_in_full(fd, buf, count) < 0) {
check_pipe(errno);
die_errno("write error");
}
}
void fwrite_or_die(FILE *f, const void *buf, size_t count)
{
if (fwrite(buf, 1, count, f) != count)
die_errno("fwrite error");
}
void fflush_or_die(FILE *f)
{
if (fflush(f))
die_errno("fflush error");
}
|
/* Set sense resistor value in mhos */
static int ds2781_set_sense_register(struct ds2781_device_info *dev_info,
u8 conductance)
{
int ret;
ret = ds2781_write(dev_info, &conductance,
DS2781_RSNSP, sizeof(u8));
if (ret < 0)
return ret;
return ds2781_save_eeprom(dev_info, DS2781_RSNSP);
} |
/**
* Release the memory buffer a sparse matrix is holding
*
* @param mtx a pointer to a valid sparse matrix
*
* By using `sptFreeSparseMatrix`, a valid sparse matrix would become
* uninitialized and should not be used anymore prior to another initialization
*/
void sptFreeSparseMatrix(sptSparseMatrix *mtx) {
sptFreeIndexVector(&mtx->rowind);
sptFreeIndexVector(&mtx->colind);
sptFreeValueVector(&mtx->values);
mtx->nrows = 0;
mtx->ncols = 0;
mtx->nnz = 0;
} |
#include <stdio.h>
#include <string.h>
int main(void)
{
char disp[10010], buf[4096];
char cmd[4096];
int n;
int i, j, k;
scanf("%d", &n);
getchar();
disp[0] = '/';
disp[1] = '\0';
for (i = 0; i < n; i++){
fgets(cmd, 4096, stdin);
cmd[strlen(cmd) - 1] = '\0';
if (strncmp(cmd, "pwd", 3) == 0){
printf("%s\n", disp);
}
else {
j = 3;
if (cmd[j] == '/'){
memset(disp, '\0', sizeof(disp));
disp[0] = '/';
j++;
}
while (cmd[j] != '\0'){
k = 0;
memset(buf, '\0', sizeof(buf));
while (cmd[j] != '/' && cmd[j] != '\0'){
buf[k] = cmd[j];
j++;
k++;
}
buf[k] = '/';
if (cmd[j] != '\0'){
j++;
}
if (strcmp(buf, "../") == 0){
disp[strlen(disp) - 1] = '\0';
k = strlen(disp) - 1;
while (disp[k] != '/'){
disp[k--] = '\0';
}
}
else {
strcat(disp, buf);
strcat(buf, "/");
}
}
}
}
return (0);
}
|
/**
* Switch slave PCM if configuration changed
*
* @param amx: Amux master
* @return: 0 if slave hasn't changed or switch succeed, negative number
* otherwise.
*/
static int amux_switch(struct snd_pcm_amux *amx)
{
int ret = -1;
char card[CARD_NAMESZ];
AMUX_DBG("%s: enter PCM(%p)\n", __func__, &amx->io);
lseek(amx->fd, SEEK_SET, 0);
ret = flock(amx->fd, LOCK_SH | LOCK_NB);
if((ret < 0) && (errno == EWOULDBLOCK)) {
ret = 0;
goto out;
}
if(ret < 0)
goto out;
ret = amux_read_pcm(amx, card, sizeof(card));
flock(amx->fd, LOCK_UN);
if(ret < 0) {
perror("Cannot read");
goto out;
}
ret = 0;
if(strcmp(card, amx->sname) == 0)
goto out;
ret = amux_cfg_slave(amx, card);
out:
if(amux_disconnected(amx)) {
snd_pcm_ioplug_set_state(&amx->io, SND_PCM_STATE_DISCONNECTED);
ret = -ENODEV;
}
return ret;
} |
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include "GLImage.h"
#include "caffe2/core/net.h"
#include "caffe2/core/predictor.h"
namespace caffe2 {
class GLPredictor : public Predictor {
public:
GLPredictor(const NetDef& init_net,
const NetDef& run_net,
bool use_texture_input = false,
Workspace* parent = nullptr);
template <class T>
bool run(std::vector<GLImageVector<T>*>& inputs, std::vector<const GLImageVector<T>*>* outputs);
~GLPredictor();
};
} // namespace caffe2
|
/*
* Copyright (C) 2005 Stephen Rothwell IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/iseries/lpar_map.h>
/* The # is to stop gcc trying to make .text nonexecutable */
const struct LparMap __attribute__((__section__(".text #"))) xLparMap = {
.xNumberEsids = HvEsidsToMap,
.xNumberRanges = HvRangesToMap,
.xSegmentTableOffs = STAB0_PAGE,
.xEsids = {
{ .xKernelEsid = GET_ESID(PAGE_OFFSET),
.xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
{ .xKernelEsid = GET_ESID(VMALLOC_START),
.xKernelVsid = KERNEL_VSID(VMALLOC_START), },
},
.xRanges = {
{ .xPages = HvPagesToMap,
.xOffset = 0,
.xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT),
},
},
};
|
#include <unistd.h>
#include <assert.h>
#include <string.h>
#include <ctype.h>
#include "faidx.h"
#include "sam.h"
#include "kstring.h"
void bam_fillmd1_core(bam1_t *b, char *ref, int is_equal, int max_nm)
{
uint8_t *seq = bam1_seq(b);
uint32_t *cigar = bam1_cigar(b);
bam1_core_t *c = &b->core;
int i, x, y, u = 0;
kstring_t *str;
uint8_t *old_md, *old_nm;
int32_t old_nm_i = -1, nm = 0;
str = (kstring_t*)calloc(1, sizeof(kstring_t));
for (i = y = 0, x = c->pos; i < c->n_cigar; ++i) {
int j, l = cigar[i]>>4, op = cigar[i]&0xf;
if (op == BAM_CMATCH) {
for (j = 0; j < l; ++j) {
int z = y + j;
int c1 = bam1_seqi(seq, z), c2 = bam_nt16_table[(int)ref[x+j]];
if (ref[x+j] == 0) break; // out of boundary
if ((c1 == c2 && c1 != 15 && c2 != 15) || c1 == 0) { // a match
if (is_equal) seq[z/2] &= (z&1)? 0xf0 : 0x0f;
++u;
} else {
ksprintf(str, "%d", u);
kputc(ref[x+j], str);
u = 0; ++nm;
}
}
if (j < l) break;
x += l; y += l;
} else if (op == BAM_CDEL) {
ksprintf(str, "%d", u);
kputc('^', str);
for (j = 0; j < l; ++j) {
if (ref[x+j] == 0) break;
kputc(ref[x+j], str);
}
u = 0;
if (j < l) break;
x += l; nm += l;
} else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) {
y += l;
if (op == BAM_CINS) nm += l;
} else if (op == BAM_CREF_SKIP) {
x += l;
}
}
ksprintf(str, "%d", u);
// apply max_nm
if (max_nm > 0 && nm >= max_nm) {
for (i = y = 0, x = c->pos; i < c->n_cigar; ++i) {
int j, l = cigar[i]>>4, op = cigar[i]&0xf;
if (op == BAM_CMATCH) {
for (j = 0; j < l; ++j) {
int z = y + j;
int c1 = bam1_seqi(seq, z), c2 = bam_nt16_table[(int)ref[x+j]];
if (ref[x+j] == 0) break; // out of boundary
if ((c1 == c2 && c1 != 15 && c2 != 15) || c1 == 0) { // a match
seq[z/2] |= (z&1)? 0x0f : 0xf0;
bam1_qual(b)[z] = 0;
}
}
if (j < l) break;
x += l; y += l;
} else if (op == BAM_CDEL || op == BAM_CREF_SKIP) x += l;
else if (op == BAM_CINS || op == BAM_CSOFT_CLIP) y += l;
}
}
// update NM
old_nm = bam_aux_get(b, "NM");
if (c->flag & BAM_FUNMAP) return;
if (old_nm) old_nm_i = bam_aux2i(old_nm);
if (!old_nm) bam_aux_append(b, "NM", 'i', 4, (uint8_t*)&nm);
else if (nm != old_nm_i) {
fprintf(stderr, "[bam_fillmd1] different NM for read '%s': %d -> %d\n", bam1_qname(b), old_nm_i, nm);
bam_aux_del(b, old_nm);
bam_aux_append(b, "NM", 'i', 4, (uint8_t*)&nm);
}
// update MD
old_md = bam_aux_get(b, "MD");
if (!old_md) bam_aux_append(b, "MD", 'Z', str->l + 1, (uint8_t*)str->s);
else {
int is_diff = 0;
if (strlen((char*)old_md+1) == str->l) {
for (i = 0; i < str->l; ++i)
if (toupper(old_md[i+1]) != toupper(str->s[i]))
break;
if (i < str->l) is_diff = 1;
} else is_diff = 1;
if (is_diff) {
fprintf(stderr, "[bam_fillmd1] different MD for read '%s': '%s' -> '%s'\n", bam1_qname(b), old_md+1, str->s);
bam_aux_del(b, old_md);
bam_aux_append(b, "MD", 'Z', str->l + 1, (uint8_t*)str->s);
}
}
free(str->s); free(str);
}
void bam_fillmd1(bam1_t *b, char *ref, int is_equal)
{
bam_fillmd1_core(b, ref, is_equal, 0);
}
int bam_fillmd(int argc, char *argv[])
{
int c, is_equal = 0, tid = -2, ret, len, is_bam_out, is_sam_in, is_uncompressed, max_nm = 0;
samfile_t *fp, *fpout = 0;
faidx_t *fai;
char *ref = 0, mode_w[8], mode_r[8];
bam1_t *b;
is_bam_out = is_sam_in = is_uncompressed = 0;
mode_w[0] = mode_r[0] = 0;
strcpy(mode_r, "r"); strcpy(mode_w, "w");
while ((c = getopt(argc, argv, "eubSn:")) >= 0) {
switch (c) {
case 'e': is_equal = 1; break;
case 'b': is_bam_out = 1; break;
case 'u': is_uncompressed = is_bam_out = 1; break;
case 'S': is_sam_in = 1; break;
case 'n': max_nm = atoi(optarg); break;
default: fprintf(stderr, "[bam_fillmd] unrecognized option '-%c'\n", c); return 1;
}
}
if (!is_sam_in) strcat(mode_r, "b");
if (is_bam_out) strcat(mode_w, "b");
else strcat(mode_w, "h");
if (is_uncompressed) strcat(mode_w, "u");
if (optind + 1 >= argc) {
fprintf(stderr, "\n");
fprintf(stderr, "Usage: samtools fillmd [-eubS] <aln.bam> <ref.fasta>\n\n");
fprintf(stderr, "Options: -e change identical bases to '='\n");
fprintf(stderr, " -u uncompressed BAM output (for piping)\n");
fprintf(stderr, " -b compressed BAM output\n");
fprintf(stderr, " -S the input is SAM with header\n\n");
return 1;
}
fp = samopen(argv[optind], mode_r, 0);
if (fp == 0) return 1;
if (is_sam_in && (fp->header == 0 || fp->header->n_targets == 0)) {
fprintf(stderr, "[bam_fillmd] input SAM does not have header. Abort!\n");
return 1;
}
fpout = samopen("-", mode_w, fp->header);
fai = fai_load(argv[optind+1]);
b = bam_init1();
while ((ret = samread(fp, b)) >= 0) {
if (b->core.tid >= 0) {
if (tid != b->core.tid) {
free(ref);
ref = fai_fetch(fai, fp->header->target_name[b->core.tid], &len);
tid = b->core.tid;
if (ref == 0)
fprintf(stderr, "[bam_fillmd] fail to find sequence '%s' in the reference.\n",
fp->header->target_name[tid]);
}
if (ref) bam_fillmd1_core(b, ref, is_equal, max_nm);
}
samwrite(fpout, b);
}
bam_destroy1(b);
free(ref);
fai_destroy(fai);
samclose(fp); samclose(fpout);
return 0;
}
|
/*
* The following code cannot be run from FLASH!
*/
static ulong flash_get_size (vu_long *addr, flash_info_t *info)
{
ushort value;
vu_short *saddr = (vu_short *)addr;
saddr[0] = 0x0090;
value = saddr[0];
switch (value) {
case (AMD_MANUFACT & 0xFFFF):
info->flash_id = FLASH_MAN_AMD;
break;
case (FUJ_MANUFACT & 0xFFFF):
info->flash_id = FLASH_MAN_FUJ;
break;
case (SST_MANUFACT & 0xFFFF):
info->flash_id = FLASH_MAN_SST;
break;
case (STM_MANUFACT & 0xFFFF):
info->flash_id = FLASH_MAN_STM;
break;
case (MT_MANUFACT & 0xFFFF):
info->flash_id = FLASH_MAN_MT;
break;
default:
info->flash_id = FLASH_UNKNOWN;
info->sector_count = 0;
info->size = 0;
saddr[0] = 0x00FF;
return (0);
}
value = saddr[1];
switch (value) {
case (AMD_ID_LV400T & 0xFFFF):
info->flash_id += FLASH_AM400T;
info->sector_count = 11;
info->size = 0x00100000;
break;
case (AMD_ID_LV400B & 0xFFFF):
info->flash_id += FLASH_AM400B;
info->sector_count = 11;
info->size = 0x00100000;
break;
case (AMD_ID_LV800T & 0xFFFF):
info->flash_id += FLASH_AM800T;
info->sector_count = 19;
info->size = 0x00200000;
break;
case (AMD_ID_LV800B & 0xFFFF):
info->flash_id += FLASH_AM800B;
info->sector_count = 19;
info->size = 0x00200000;
break;
case (AMD_ID_LV160T & 0xFFFF):
info->flash_id += FLASH_AM160T;
info->sector_count = 35;
info->size = 0x00400000;
break;
case (AMD_ID_LV160B & 0xFFFF):
info->flash_id += FLASH_AM160B;
info->sector_count = 35;
info->size = 0x00400000;
break;
#if 0
case (AMD_ID_LV320T & 0xFFFF):
info->flash_id += FLASH_AM320T;
info->sector_count = 67;
info->size = 0x00800000;
break;
case (AMD_ID_LV320B & 0xFFFF):
info->flash_id += FLASH_AM320B;
info->sector_count = 67;
info->size = 0x00800000;
break;
#endif
case (SST_ID_xF200A & 0xFFFF):
info->flash_id += FLASH_SST200A;
info->sector_count = 64;
info->size = 0x00080000;
break;
case (SST_ID_xF400A & 0xFFFF):
info->flash_id += FLASH_SST400A;
info->sector_count = 128;
info->size = 0x00100000;
break;
case (SST_ID_xF800A & 0xFFFF):
info->flash_id += FLASH_SST800A;
info->sector_count = 256;
info->size = 0x00200000;
break;
case (STM_ID_x800AB & 0xFFFF):
info->flash_id += FLASH_STM800AB;
info->sector_count = 19;
info->size = 0x00200000;
break;
case (MT_ID_28F400_T & 0xFFFF):
info->flash_id += FLASH_28F400_T;
info->sector_count = 7;
info->size = 0x00080000;
break;
case (MT_ID_28F400_B & 0xFFFF):
info->flash_id += FLASH_28F400_B;
info->sector_count = 7;
info->size = 0x00080000;
break;
default:
info->flash_id = FLASH_UNKNOWN;
saddr[0] = 0x00FF;
return (0);
}
if (info->sector_count > CONFIG_SYS_MAX_FLASH_SECT) {
printf ("** ERROR: sector count %d > max (%d) **\n",
info->sector_count, CONFIG_SYS_MAX_FLASH_SECT);
info->sector_count = CONFIG_SYS_MAX_FLASH_SECT;
}
saddr[0] = 0x00FF;
return (info->size);
} |
/**
* @brief find the body part in an HTTP message
* @arg http_message: pointer to full HTTP message
* @arg len: pointer to HTTP message length. It will contain body length in return.
* @retval pointer to body in HTTP message
*/
uint8_t * http_find_body(uint8_t * http_message, uint32_t *len)
{
uint8_t *p = NULL;
p = http_message;
while ((*len) >= 4)
{
if (p[0] == '\r'
&& p[1] == '\n'
&& p[2] == '\r'
&& p[3] == '\n')
{
*len = (*len) - 4;
return p+4;
}
p++;
(*len)--;
}
*len = 0;
return NULL;
} |
//*****************************************************************************
//
//! Receives a byte that has been sent to the SoftI2C module.
//!
//! \param psI2C specifies the SoftI2C data structure.
//!
//! This function reads a byte of data from the SoftI2C module that was
//! received as a result of an appropriate call to SoftI2CControl().
//!
//! \return Returns the byte received by the SoftI2C module, cast as an
//! uint32_t.
//
//*****************************************************************************
uint32_t
SoftI2CDataGet(tSoftI2C *psI2C)
{
Read a byte.
return(psI2C->ui8Data);
} |
// MidReturn workspace at ws+3328 length ws+8
void f148_MidReturn(i8* p2908 ) {
i1 v2909 = (i1)+28;
i8 v2910 = (i8)(intptr_t)(f103_AllocateNewNode);
i8 v2911;
((void(*)(i8* , i1 ))(intptr_t)v2910)(&v2911, v2909);
i8 v2912 = (i8)(intptr_t)(ws+3328);
*(i8*)(intptr_t)v2912 = v2911;
endsub:;
*p2908 = *(i8*)(intptr_t)(ws+3328);
} |
/* Fill in the register cache *THIS_CACHE for THIS_FRAME for use
in the stub unwinder. */
static struct trad_frame_cache *
nios2_stub_frame_cache (struct frame_info *this_frame, void **this_cache)
{
CORE_ADDR pc;
CORE_ADDR start_addr;
CORE_ADDR stack_addr;
struct trad_frame_cache *this_trad_cache;
struct gdbarch *gdbarch = get_frame_arch (this_frame);
int num_regs = gdbarch_num_regs (gdbarch);
if (*this_cache != NULL)
return *this_cache;
this_trad_cache = trad_frame_cache_zalloc (this_frame);
*this_cache = this_trad_cache;
trad_frame_set_reg_realreg (this_trad_cache,
gdbarch_pc_regnum (gdbarch),
NIOS2_RA_REGNUM);
pc = get_frame_pc (this_frame);
find_pc_partial_function (pc, NULL, &start_addr, NULL);
stack_addr = get_frame_register_unsigned (this_frame, NIOS2_SP_REGNUM);
trad_frame_set_id (this_trad_cache, frame_id_build (start_addr, stack_addr));
trad_frame_set_this_base (this_trad_cache, stack_addr);
return this_trad_cache;
} |
/* dissector_tables_dlg.c
* dissector_tables_dlg 2010 Anders Broman
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <string.h>
#include <epan/packet.h>
#include <gtk/gtk.h>
#include "ui/gtk/gui_utils.h"
#include "ui/gtk/dlg_utils.h"
#include "ui/gtk/dissector_tables_dlg.h"
static GtkWidget *dissector_tables_dlg_w = NULL;
/* The columns */
enum
{
TABLE_UI_NAME_COL,
TABLE_SHORT_NAME_COL,
N_COLUMNS
};
static void
win_destroy_cb(GtkWindow *win _U_, gpointer data _U_)
{
if (dissector_tables_dlg_w != NULL) {
window_destroy(dissector_tables_dlg_w);
dissector_tables_dlg_w = NULL;
}
}
/*
* For a dissector table, put
* its short name and its
* descriptive name in the treeview.
*/
struct dissector_tables_tree_info {
GtkWidget *tree;
GtkTreeIter iter;
GtkTreeIter new_iter;
};
typedef struct dissector_tables_tree_info dissector_tables_tree_info_t;
static gint
ui_sort_func(GtkTreeModel *model,
GtkTreeIter *a,
GtkTreeIter *b,
gpointer user_data)
{
gchar *stra, *strb;
/* The col to get data from is in userdata */
gint data_column = GPOINTER_TO_INT(user_data);
gtk_tree_model_get(model, a, data_column, &stra, -1);
gtk_tree_model_get(model, b, data_column, &strb, -1);
return strcmp(stra, strb);
}
/*
* Struct to hold the pointer to the trees
* for dissector tables.
*/
struct dissector_tables_trees {
GtkWidget *str_tree_wgt;
GtkWidget *uint_tree_wgt;
GtkWidget *custom_tree_wgt;
GtkWidget *heuristic_tree_wgt;
};
typedef struct dissector_tables_trees dissector_tables_trees_t;
static void
proto_add_to_list(dissector_tables_tree_info_t *tree_info,
GtkTreeStore *store,
const gchar *str,
const gchar *proto_name)
{
gtk_tree_store_insert_with_values(store, &tree_info->new_iter, &tree_info->iter, G_MAXINT,
TABLE_UI_NAME_COL, str,
TABLE_SHORT_NAME_COL, proto_name,
-1);
}
static void
decode_proto_add_to_list (const gchar *table_name _U_, ftenum_t selector_type,
gpointer key, gpointer value, gpointer user_data)
{
GtkTreeStore *store;
const gchar *proto_name;
dtbl_entry_t *dtbl_entry;
dissector_handle_t handle;
guint32 port;
gchar *int_str;
const gchar *dissector_name_str;
dissector_tables_tree_info_t *tree_info;
tree_info = (dissector_tables_tree_info_t *)user_data;
dtbl_entry = (dtbl_entry_t*)value;
handle = dtbl_entry_get_handle(dtbl_entry);
proto_name = dissector_handle_get_short_name(handle);
store = GTK_TREE_STORE(gtk_tree_view_get_model(GTK_TREE_VIEW(tree_info->tree)));
switch (selector_type) {
case FT_UINT8:
case FT_UINT16:
case FT_UINT24:
case FT_UINT32:
port = GPOINTER_TO_UINT(key);
/* Hack: Use fixed width rj str so alpha sort (strcmp) will sort field numerically */
int_str = g_strdup_printf ("%10d", port);
proto_add_to_list(tree_info, store, int_str, proto_name);
g_free (int_str);
break;
case FT_STRING:
case FT_STRINGZ:
case FT_UINT_STRING:
case FT_STRINGZPAD:
proto_add_to_list(tree_info, store, (const gchar*)key, proto_name);
break;
case FT_BYTES:
case FT_GUID:
dissector_name_str = dissector_handle_get_dissector_name(handle);
if (dissector_name_str == NULL)
dissector_name_str = "<Unknown>";
proto_add_to_list(tree_info, store, dissector_name_str, proto_name);
break;
default:
g_assert_not_reached();
}
}
static void
table_name_add_to_list(dissector_tables_tree_info_t *tree_info,
GtkWidget *tree_view,
const char *table_name,
const char *ui_name)
{
GtkTreeStore *store;
tree_info->tree = tree_view;
store = GTK_TREE_STORE(gtk_tree_view_get_model(GTK_TREE_VIEW(tree_view))); /* Get store */
gtk_tree_store_insert_with_values(store, &tree_info->iter, NULL, G_MAXINT,
TABLE_UI_NAME_COL, ui_name,
TABLE_SHORT_NAME_COL, table_name,
-1);
}
static void
display_heur_dissector_table_entries(const char *table_name _U_,
struct heur_dtbl_entry *dtbl_entry, gpointer user_data)
{
dissector_tables_tree_info_t *tree_info = (dissector_tables_tree_info_t*)user_data;
GtkTreeStore *store;
if (dtbl_entry->protocol) {
store = GTK_TREE_STORE(gtk_tree_view_get_model(GTK_TREE_VIEW(tree_info->tree))); /* Get store */
proto_add_to_list(tree_info, store,
(gchar *)proto_get_protocol_long_name(dtbl_entry->protocol),
proto_get_protocol_short_name(dtbl_entry->protocol));
}else{
g_warning("no protocol info");
}
}
static void
display_heur_dissector_table_names(const char *table_name, struct heur_dissector_list *list, gpointer w)
{
dissector_tables_trees_t *dis_tbl_trees;
dissector_tables_tree_info_t *tree_info;
tree_info = g_new(dissector_tables_tree_info_t, 1);
dis_tbl_trees = (dissector_tables_trees_t*)w;
table_name_add_to_list(tree_info, dis_tbl_trees->heuristic_tree_wgt, "", table_name);
if (list) {
heur_dissector_table_foreach(table_name, display_heur_dissector_table_entries, tree_info);
}
}
static void
display_dissector_table_names(const char *table_name, const char *ui_name, void *w)
{
dissector_tables_trees_t *dis_tbl_trees;
dissector_tables_tree_info_t *tree_info;
ftenum_t selector_type = get_dissector_table_selector_type(table_name);
tree_info = g_new(dissector_tables_tree_info_t, 1);
dis_tbl_trees = (dissector_tables_trees_t*)w;
switch (selector_type) {
case FT_UINT8:
case FT_UINT16:
case FT_UINT24:
case FT_UINT32:
table_name_add_to_list(tree_info, dis_tbl_trees->uint_tree_wgt, table_name, ui_name);
break;
case FT_STRING:
case FT_STRINGZ:
case FT_UINT_STRING:
case FT_STRINGZPAD:
table_name_add_to_list(tree_info, dis_tbl_trees->str_tree_wgt, table_name, ui_name);
break;
case FT_BYTES:
case FT_GUID:
table_name_add_to_list(tree_info, dis_tbl_trees->custom_tree_wgt, table_name, ui_name);
break;
default:
break;
}
dissector_table_foreach(table_name, decode_proto_add_to_list, tree_info);
g_free(tree_info);
}
static GtkWidget *
init_table(void)
{
GtkTreeStore *store;
GtkWidget *tree;
GtkTreeView *tree_view;
GtkTreeViewColumn *column;
GtkCellRenderer *renderer;
GtkTreeSortable *sortable;
/* Create the store */
store = gtk_tree_store_new (N_COLUMNS, /* Total number of columns */
G_TYPE_STRING, /* Table */
G_TYPE_STRING); /* Table */
/* Create a view */
tree = gtk_tree_view_new_with_model (GTK_TREE_MODEL (store));
tree_view = GTK_TREE_VIEW(tree);
sortable = GTK_TREE_SORTABLE(store);
/* Speed up the list display */
gtk_tree_view_set_fixed_height_mode(tree_view, TRUE);
/* Setup the sortable columns */
gtk_tree_view_set_headers_clickable(GTK_TREE_VIEW (tree), FALSE);
/* The view now holds a reference. We can get rid of our own reference */
g_object_unref (G_OBJECT (store));
/* Create the first column, associating the "text" attribute of the
* cell_renderer to the first column of the model */
renderer = gtk_cell_renderer_text_new ();
column = gtk_tree_view_column_new_with_attributes ("UI name", renderer, "text", TABLE_UI_NAME_COL, NULL);
gtk_tree_sortable_set_sort_func(sortable, TABLE_UI_NAME_COL,
ui_sort_func, GINT_TO_POINTER(TABLE_UI_NAME_COL), NULL);
gtk_tree_view_column_set_sort_column_id(column, TABLE_UI_NAME_COL);
gtk_tree_view_column_set_resizable(column, TRUE);
gtk_tree_view_column_set_sizing(column, GTK_TREE_VIEW_COLUMN_FIXED);
gtk_tree_view_column_set_min_width(column, 80);
gtk_tree_view_column_set_fixed_width(column, 330);
gtk_tree_view_append_column (GTK_TREE_VIEW (tree_view), column);
renderer = gtk_cell_renderer_text_new ();
column = gtk_tree_view_column_new_with_attributes ("Short name", renderer, "text", TABLE_SHORT_NAME_COL, NULL);
gtk_tree_sortable_set_sort_func(sortable, TABLE_SHORT_NAME_COL,
ui_sort_func, GINT_TO_POINTER(TABLE_SHORT_NAME_COL), NULL);
gtk_tree_view_column_set_sort_column_id(column, TABLE_SHORT_NAME_COL);
gtk_tree_view_column_set_resizable(column, TRUE);
gtk_tree_view_column_set_sizing(column, GTK_TREE_VIEW_COLUMN_FIXED);
gtk_tree_view_column_set_min_width(column, 80);
gtk_tree_view_column_set_fixed_width(column, 100);
gtk_tree_view_append_column (GTK_TREE_VIEW (tree_view), column);
return tree;
}
static void
dissector_tables_dlg_init(void)
{
dissector_tables_trees_t dis_tbl_trees;
GtkWidget *vbox;
GtkWidget *hbox;
GtkWidget *main_nb;
GtkWidget *scrolled_window;
GtkTreeSortable *sortable;
GtkWidget *temp_page, *tmp;
dissector_tables_dlg_w = dlg_window_new("Dissector tables"); /* transient_for top_level */
gtk_window_set_destroy_with_parent (GTK_WINDOW(dissector_tables_dlg_w), TRUE);
gtk_window_set_default_size(GTK_WINDOW(dissector_tables_dlg_w), 700, 300);
vbox=ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 3, FALSE);
gtk_container_add(GTK_CONTAINER(dissector_tables_dlg_w), vbox);
gtk_container_set_border_width(GTK_CONTAINER(vbox), 12);
main_nb = gtk_notebook_new();
gtk_box_pack_start(GTK_BOX(vbox), main_nb, TRUE, TRUE, 0);
/* String tables */
temp_page = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 6, FALSE);
tmp = gtk_label_new("String tables");
gtk_widget_show(tmp);
hbox = ws_gtk_box_new(GTK_ORIENTATION_HORIZONTAL, 3, FALSE);
gtk_box_pack_start(GTK_BOX (hbox), tmp, TRUE, TRUE, 0);
gtk_notebook_append_page(GTK_NOTEBOOK(main_nb), temp_page, hbox);
scrolled_window = scrolled_window_new(NULL, NULL);
dis_tbl_trees.str_tree_wgt = init_table();
gtk_widget_show(dis_tbl_trees.str_tree_wgt);
gtk_container_add(GTK_CONTAINER(scrolled_window), dis_tbl_trees.str_tree_wgt);
gtk_box_pack_start(GTK_BOX(temp_page), scrolled_window, TRUE, TRUE, 0);
gtk_widget_show(scrolled_window);
/* uint tables */
temp_page = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 6, FALSE);
tmp = gtk_label_new("Integer tables");
gtk_widget_show(tmp);
hbox = ws_gtk_box_new(GTK_ORIENTATION_HORIZONTAL, 3, FALSE);
gtk_box_pack_start(GTK_BOX (hbox), tmp, TRUE, TRUE, 0);
gtk_notebook_append_page(GTK_NOTEBOOK(main_nb), temp_page, hbox);
scrolled_window = scrolled_window_new(NULL, NULL);
dis_tbl_trees.uint_tree_wgt = init_table();
gtk_widget_show(dis_tbl_trees.uint_tree_wgt);
gtk_container_add(GTK_CONTAINER(scrolled_window), dis_tbl_trees.uint_tree_wgt);
gtk_box_pack_start(GTK_BOX(temp_page), scrolled_window, TRUE, TRUE, 0);
gtk_widget_show(scrolled_window);
/* custom tables */
temp_page = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 6, FALSE);
tmp = gtk_label_new("Custom tables");
gtk_widget_show(tmp);
hbox = ws_gtk_box_new(GTK_ORIENTATION_HORIZONTAL, 3, FALSE);
gtk_box_pack_start(GTK_BOX (hbox), tmp, TRUE, TRUE, 0);
gtk_notebook_append_page(GTK_NOTEBOOK(main_nb), temp_page, hbox);
scrolled_window = scrolled_window_new(NULL, NULL);
dis_tbl_trees.custom_tree_wgt = init_table();
gtk_widget_show(dis_tbl_trees.custom_tree_wgt);
gtk_container_add(GTK_CONTAINER(scrolled_window), dis_tbl_trees.custom_tree_wgt);
gtk_box_pack_start(GTK_BOX(temp_page), scrolled_window, TRUE, TRUE, 0);
gtk_widget_show(scrolled_window);
/* heuristic tables */
temp_page = ws_gtk_box_new(GTK_ORIENTATION_VERTICAL, 6, FALSE);
tmp = gtk_label_new("Heuristic tables");
gtk_widget_show(tmp);
hbox = ws_gtk_box_new(GTK_ORIENTATION_HORIZONTAL, 3, FALSE);
gtk_box_pack_start(GTK_BOX(hbox), tmp, TRUE, TRUE, 0);
gtk_notebook_append_page(GTK_NOTEBOOK(main_nb), temp_page, hbox);
scrolled_window = scrolled_window_new(NULL, NULL);
dis_tbl_trees.heuristic_tree_wgt = init_table();
gtk_widget_show(dis_tbl_trees.heuristic_tree_wgt);
gtk_container_add(GTK_CONTAINER(scrolled_window), dis_tbl_trees.heuristic_tree_wgt);
gtk_box_pack_start(GTK_BOX(temp_page), scrolled_window, TRUE, TRUE, 0);
gtk_widget_show(scrolled_window);
/* We must display TOP LEVEL Widget before calling init_table() */
gtk_widget_show_all(dissector_tables_dlg_w);
g_signal_connect(dissector_tables_dlg_w, "destroy", G_CALLBACK(win_destroy_cb), NULL);
/* Fill the table with data */
dissector_all_tables_foreach_table(display_dissector_table_names, &dis_tbl_trees, NULL);
dissector_all_heur_tables_foreach_table(display_heur_dissector_table_names, (gpointer)&dis_tbl_trees, NULL);
sortable = GTK_TREE_SORTABLE(gtk_tree_view_get_model(GTK_TREE_VIEW(dis_tbl_trees.str_tree_wgt)));
gtk_tree_sortable_set_sort_column_id(sortable, TABLE_UI_NAME_COL, GTK_SORT_ASCENDING);
sortable = GTK_TREE_SORTABLE(gtk_tree_view_get_model(GTK_TREE_VIEW(dis_tbl_trees.uint_tree_wgt)));
gtk_tree_sortable_set_sort_column_id(sortable, TABLE_UI_NAME_COL, GTK_SORT_ASCENDING);
sortable = GTK_TREE_SORTABLE(gtk_tree_view_get_model(GTK_TREE_VIEW(dis_tbl_trees.custom_tree_wgt)));
gtk_tree_sortable_set_sort_column_id(sortable, TABLE_UI_NAME_COL, GTK_SORT_ASCENDING);
sortable = GTK_TREE_SORTABLE(gtk_tree_view_get_model(GTK_TREE_VIEW(dis_tbl_trees.heuristic_tree_wgt)));
gtk_tree_sortable_set_sort_column_id(sortable, TABLE_UI_NAME_COL, GTK_SORT_ASCENDING);
}
void
dissector_tables_dlg_cb(GtkWidget *w _U_, gpointer d _U_)
{
if (dissector_tables_dlg_w) {
reactivate_window(dissector_tables_dlg_w);
} else {
dissector_tables_dlg_init();
}
}
/*
* Editor modelines - http://www.wireshark.org/tools/modelines.html
*
* Local variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* vi: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/
|
/// vf CONFIGURE entry point for the ZRMJPEG filter
/**
* \param vf video filter instance pointer
* \param width image source width in pixels
* \param height image source height in pixels
* \param d_width width of requested window, just a hint
* \param d_height height of requested window, just a hint
* \param flags vf filter flags
* \param outfmt
*
* \returns returns 0 on error
*
* This routine will make the necessary hardware-related decisions for
* the ZRMJPEG filter, do the initialization of the MJPEG encoder, and
* then select one of the ZRJMJPEGIT or ZRMJPEGNI filters and then
* arrange to dispatch to the config() entry pointer for the one
* selected.
*/
static int config(struct vf_instance *vf, int width, int height, int d_width,
int d_height, unsigned int flags, unsigned int outfmt){
struct vf_priv_s *priv = vf->priv;
float aspect_decision;
int stretchx, stretchy, err = 0, maxstretchx = 4;
priv->fields = 1;
VERBOSE("config() called\n");
if (priv->j) {
VERBOSE("re-configuring, resetting JPEG encoder\n");
jpeg_enc_uninit(priv->j);
priv->j = NULL;
}
aspect_decision = ((float)d_width/(float)d_height)/
((float)width/(float)height);
if (aspect_decision > 1.8 && aspect_decision < 2.2) {
VERBOSE("should correct aspect by stretching x times 2, %d %d\n", 2*width, priv->maxwidth);
if (2*width <= priv->maxwidth) {
d_width = 2*width;
d_height = height;
maxstretchx = 2;
} else {
WARNING("unable to correct aspect by stretching, because resulting X will be too large, aspect correction by decimating y not yet implemented\n");
d_width = width;
d_height = height;
}
} else {
d_width = width;
d_height = height;
}
if (d_width > priv->maxwidth/2 || height > priv->maxheight/2
|| maxstretchx == 1) {
stretchx = 1;
stretchy = 1;
priv->fields = 2;
if (priv->vdec == 2) {
priv->fields = 1;
} else if (priv->vdec == 4) {
priv->fields = 1;
stretchy = 2;
}
if (priv->hdec > maxstretchx) {
if (priv->fd) {
WARNING("horizontal decimation too high, "
"changing to %d (use fd to keep"
" hdec=%d)\n",
maxstretchx, priv->hdec);
priv->hdec = maxstretchx;
}
}
stretchx = priv->hdec;
} else if (d_width > priv->maxwidth/4 ||
height > priv->maxheight/4 ||
maxstretchx == 2) {
stretchx = 2;
stretchy = 1;
priv->fields = 1;
if (priv->vdec == 2) {
stretchy = 2;
} else if (priv->vdec == 4) {
if (!priv->fd) {
WARNING("vertical decimation too high, "
"changing to 2 (use fd to keep "
"vdec=4)\n");
priv->vdec = 2;
}
stretchy = 2;
}
if (priv->hdec == 2) {
stretchx = 4;
} else if (priv->hdec == 4) {
if (priv->fd) {
WARNING("horizontal decimation too high, "
"changing to 2 (use fd to keep "
"hdec=4)\n");
priv->hdec = 2;
}
stretchx = 4;
}
} else {
stretchx = 4;
stretchy = 2;
priv->fields = 1;
if (priv->vdec != 1 && !priv->fd) {
WARNING("vertical decimation too high, changing to 1 "
"(use fd to keep vdec=%d)\n",
priv->vdec);
priv->vdec = 1;
}
if (priv->hdec != 1 && !priv->fd) {
WARNING("horizontal decimation too high, changing to 1 (use fd to keep hdec=%d)\n", priv->hdec);
priv->hdec = 1;
}
}
VERBOSE("generated JPEG's %dx%s%d%s, stretched to %dx%d\n",
width/priv->hdec, (priv->fields == 2) ? "(" : "",
height/(priv->vdec*priv->fields),
(priv->fields == 2) ? "x2)" : "",
(width/priv->hdec)*stretchx,
(height/(priv->vdec*priv->fields))*
stretchy*priv->fields);
if ((width/priv->hdec)*stretchx > priv->maxwidth ||
(height/(priv->vdec*priv->fields))*
stretchy*priv->fields > priv->maxheight) {
ERROR("output dimensions too large (%dx%d), max (%dx%d) "
"insert crop to fix\n",
(width/priv->hdec)*stretchx,
(height/(priv->vdec*priv->fields))*
stretchy*priv->fields,
priv->maxwidth, priv->maxheight);
err = 1;
}
if (width%(16*priv->hdec) != 0) {
ERROR("width must be a multiple of 16*hdec (%d), use expand\n",
priv->hdec*16);
err = 1;
}
if (height%(8*priv->fields*priv->vdec) != 0) {
ERROR("height must be a multiple of 8*fields*vdec (%d),"
" use expand\n", priv->vdec*priv->fields*8);
err = 1;
}
if (err) return 0;
priv->y_stride = width;
priv->c_stride = width/2;
priv->j = jpeg_enc_init(width, height/priv->fields,
priv->fields*priv->y_stride,
priv->fields*priv->c_stride,
priv->fields*priv->c_stride,
1, priv->quality, priv->bw);
if (!priv->j) return 0;
return vf_next_config(vf, width, height, d_width, d_height, flags,
(priv->fields == 2) ? IMGFMT_ZRMJPEGIT : IMGFMT_ZRMJPEGNI);
} |
/**
* system_health_monitor_probe() - Probe function to construct HMA info
* @pdev: Platform device pointing to a device tree node.
*
* This function extracts the HMA information from the device tree, constructs
* it and adds it to the global list.
*
* Return: 0 on success, standard Linux error codes on failure.
*/
static int system_health_monitor_probe(struct platform_device *pdev)
{
int rc;
struct hma_info *hma, *tmp_hma;
struct device_node *node;
mutex_lock(&hma_info_list_lock);
for_each_child_of_node(pdev->dev.of_node, node) {
hma = kzalloc(sizeof(*hma), GFP_KERNEL);
if (!hma) {
SHM_ERR("%s: Error allocation hma_info\n", __func__);
rc = -ENOMEM;
goto probe_err;
}
rc = parse_devicetree(node, hma);
if (rc) {
SHM_ERR("%s Failed to parse Device Tree\n", __func__);
kfree(hma);
goto probe_err;
}
init_srcu_struct(&hma->reset_srcu);
hma->restart_nb.notifier_call = restart_notifier_cb;
hma->restart_nb_h = subsys_notif_register_notifier(
hma->ssrestart_string, &hma->restart_nb);
if (IS_ERR_OR_NULL(hma->restart_nb_h)) {
cleanup_srcu_struct(&hma->reset_srcu);
kfree(hma);
rc = -EFAULT;
SHM_ERR("%s: Error registering restart notif for %s\n",
__func__, hma->ssrestart_string);
goto probe_err;
}
list_add_tail(&hma->list, &hma_info_list);
SHM_INFO("%s: Added HMA info for %s\n",
__func__, hma->subsys_name);
}
rc = start_system_health_monitor_service();
if (rc) {
SHM_ERR("%s Failed to start service %d\n", __func__, rc);
goto probe_err;
}
mutex_unlock(&hma_info_list_lock);
return 0;
probe_err:
list_for_each_entry_safe(hma, tmp_hma, &hma_info_list, list) {
list_del(&hma->list);
subsys_notif_unregister_notifier(hma->restart_nb_h,
&hma->restart_nb);
cleanup_srcu_struct(&hma->reset_srcu);
kfree(hma);
}
mutex_unlock(&hma_info_list_lock);
return rc;
} |
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
typedef struct D{int i,s;}D;
D res[40];
int N,s[30],r,i,j,c,f;
char S[1000],*p;
int cmp(const void *a,const void *b)
{
D*x=(D*)a,*y=(D*)b;
return x->s==y->s?x->i-y->i:y->s-x->s;
}
int main()
{
for(;scanf("%d\n",&N),N;)
{
for(j=0;j<N;j++)
{
gets(S);
p=strtok(S," ");
res[j].i=atoi(p);
for(c=0;(p=strtok(NULL," "))!=NULL;c++)
{
s[c]=atoi(p);
}
for(i=r=0,f=1;i<c;f++)
{
if(f==10)
{
for(;i<c;i++)r+=s[i];
}
else if(s[i]==10)
{
r+=s[i]+s[i+1]+s[i+2];
i++;
}
else if(s[i]+s[i+1]==10)
{
r+=10+s[i+2];
i+=2;
}
else
{
r+=s[i]+s[i+1];
i+=2;
}
}
res[j].s=r;
}
qsort(res,N,sizeof(D),cmp);
for(j=0;j<N;j++)
printf("%d %d\n",res[j].i,res[j].s);
}
return 0;
} |
/* dds_seq_insert -- Insert a value at the given position of the sequence. The
current sequence must already be large enough to be able to
do this. */
DDS_ReturnCode_t dds_seq_insert (void *seq, unsigned pos, void *value)
{
DDS_VoidSeq *sp = (DDS_VoidSeq *) seq;
char *cp;
int ret;
if (!pos)
return (dds_seq_prepend (seq, value));
if (pos > sp->_length)
return (DDS_RETCODE_OUT_OF_RESOURCES);
if (pos == sp->_length)
return (dds_seq_append (seq, value));
if (sp->_length == sp->_maximum) {
ret = dds_seq_extend (seq, 1);
if (ret)
return (ret);
}
cp = (char *) sp->_buffer + sp->_esize * pos;
memmove (cp + sp->_esize, cp, (sp->_length - pos) * sp->_esize);
memcpy (cp, value, sp->_esize);
sp->_length++;
return (DDS_RETCODE_OK);
} |
// it starts with a simple camera:CalculateViewMatrices();
// I will return to this.
float* alpha_Physical_GetViewMatrix(alpha_Physical* phys, void* requestor)
{
if(!requestor) {
requestor = phys;
}
if(phys->parent && phys->parent != requestor) {
if(phys->parentType == alpha_PhysicalType_PHYSICAL) {
phys->viewMatrix = alpha_RMatrix4_Multiplied(phys->pool,
alpha_Physical_GetModelMatrix(phys),
alpha_Physical_GetViewMatrix(phys->parent, requestor)
);
}
else {
phys->viewMatrix = alpha_RMatrix4_Multiplied(phys->pool,
alpha_Physical_GetModelMatrix(phys),
alpha_Camera_GetViewMatrix(phys->parent, requestor)
);
}
return phys->viewMatrix;
}
float* modelMatrix = alpha_Physical_GetModelMatrix(phys);
return alpha_RMatrix4_Inversed(phys->pool, modelMatrix);
} |
/*
* NOTE: This is an internal header and installed for use by extensions. The
* API is not guaranteed stable.
*/
// Internal defs
#ifndef HAMMER_BACKEND_REGEX__H
#define HAMMER_BACKEND_REGEX__H
#include <setjmp.h>
// each insn is an 8-bit opcode and a 16-bit parameter
// [a] are actions; they add an instruction to the stackvm that is being output.
// [m] are match ops; they can either succeed or fail, depending on the current character
// [c] are control ops. They affect the pc non-linearly.
typedef enum HRVMOp_ {
RVM_ACCEPT, // [a]
RVM_GOTO, // [c] parameter is an offset into the instruction table
RVM_FORK, // [c] parameter is an offset into the instruction table
RVM_PUSH, // [a] No arguments, just pushes a mark (pointer to some
// character in the input string) onto the stack
RVM_ACTION, // [a] argument is an action ID
RVM_CAPTURE, // [a] Capture the last string (up to the current
// position, non-inclusive), and push it on the
// stack. No arg.
RVM_EOF, // [m] Succeeds only if at EOF.
RVM_MATCH, // [m] The high byte of the parameter is an upper bound
// and the low byte is a lower bound, both
// inclusive. An inverted match should be handled
// as two ranges.
RVM_STEP, // [a] Step to the next byte of input
RVM_OPCOUNT
} HRVMOp;
typedef struct HRVMInsn_{
uint8_t op;
uint16_t arg;
} HRVMInsn;
#define TT_MARK TT_RESERVED_1
typedef struct HSVMContext_ {
HParsedToken **stack;
size_t stack_count; // number of items on the stack. Thus stack[stack_count] is the first unused item on the stack.
size_t stack_capacity;
} HSVMContext;
// These actions all assume that the items on the stack are not
// aliased anywhere.
typedef bool (*HSVMActionFunc)(HArena *arena, HSVMContext *ctx, void* env);
typedef struct HSVMAction_ {
HSVMActionFunc action;
void* env;
} HSVMAction;
struct HRVMProg_ {
HAllocator *allocator;
size_t length;
size_t action_count;
HRVMInsn *insns;
HSVMAction *actions;
jmp_buf except;
};
// Returns true IFF the provided parser could be compiled.
bool h_compile_regex(HRVMProg *prog, const HParser* parser);
// These functions are used by the compile_to_rvm method of HParser
uint16_t h_rvm_create_action(HRVMProg *prog, HSVMActionFunc action_func, void* env);
// returns the address of the instruction just created
uint16_t h_rvm_insert_insn(HRVMProg *prog, HRVMOp op, uint16_t arg);
// returns the address of the next insn to be created.
uint16_t h_rvm_get_ip(HRVMProg *prog);
// Used to insert forward references; the idea is to generate a JUMP
// or FORK instruction with a target of 0, then update it once the
// correct target is known.
void h_rvm_patch_arg(HRVMProg *prog, uint16_t ip, uint16_t new_val);
// Common SVM action funcs...
bool h_svm_action_make_sequence(HArena *arena, HSVMContext *ctx, void* env);
bool h_svm_action_clear_to_mark(HArena *arena, HSVMContext *ctx, void* env);
extern HParserBackendVTable h__regex_backend_vtable;
#endif
|
/* Here's the actual meat of it */
void write_entry() {
vmu_pkg_t pkg;
uint8 data[4096], *pkg_out;
int pkg_size;
int i;
file_t f;
strcpy(pkg.desc_short, "VMU Test");
strcpy(pkg.desc_long, "This is a test VMU file");
strcpy(pkg.app_id, "KOS");
pkg.icon_cnt = 0;
pkg.icon_anim_speed = 0;
pkg.eyecatch_type = VMUPKG_EC_NONE;
pkg.data_len = 4096;
pkg.data = data;
for(i = 0; i < 4096; i++)
data[i] = i & 255;
vmu_pkg_build(&pkg, &pkg_out, &pkg_size);
fs_unlink("/vmu/a1/TESTFILE");
f = fs_open("/vmu/a1/TESTFILE", O_WRONLY);
if(!f) {
printf("error writing\n");
return;
}
fs_write(f, pkg_out, pkg_size);
fs_close(f);
} |
/* Determines if a given dot is a reserved corner dot
* to be used by one of the last six bits
*/
static int is_corner(const int column, const int row, const int width, const int height) {
int corner = 0;
if ((column == 0) && (row == 0)) {
corner = 1;
}
if (height % 2) {
if (((column == width - 2) && (row == 0))
|| ((column == width - 1) && (row == 1))) {
corner = 1;
}
} else {
if ((column == width - 1) && (row == 0)) {
corner = 1;
}
}
if (height % 2) {
if ((column == 0) && (row == height - 1)) {
corner = 1;
}
} else {
if (((column == 0) && (row == height - 2))
|| ((column == 1) && (row == height - 1))) {
corner = 1;
}
}
if (((column == width - 2) && (row == height - 1))
|| ((column == width - 1) && (row == height - 2))) {
corner = 1;
}
return corner;
} |
/*
* Copyright (C) 2010 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef APISerializedScriptValue_h
#define APISerializedScriptValue_h
#include "APIObject.h"
#include "DataReference.h"
#include <WebCore/SerializedScriptValue.h>
#include <wtf/RefPtr.h>
#if USE(GLIB)
#include <wtf/glib/GRefPtr.h>
typedef struct _GVariant GVariant;
typedef struct _JSCContext JSCContext;
typedef struct _JSCValue JSCValue;
#endif
namespace API {
class SerializedScriptValue : public API::ObjectImpl<API::Object::Type::SerializedScriptValue> {
public:
static Ref<SerializedScriptValue> create(Ref<WebCore::SerializedScriptValue>&& serializedValue)
{
return adoptRef(*new SerializedScriptValue(WTFMove(serializedValue)));
}
static RefPtr<SerializedScriptValue> create(JSContextRef context, JSValueRef value, JSValueRef* exception)
{
RefPtr<WebCore::SerializedScriptValue> serializedValue = WebCore::SerializedScriptValue::create(context, value, exception);
if (!serializedValue)
return nullptr;
return adoptRef(*new SerializedScriptValue(serializedValue.releaseNonNull()));
}
static Ref<SerializedScriptValue> createFromWireBytes(Vector<uint8_t>&& buffer)
{
return adoptRef(*new SerializedScriptValue(WebCore::SerializedScriptValue::createFromWireBytes(WTFMove(buffer))));
}
JSValueRef deserialize(JSContextRef context, JSValueRef* exception)
{
return m_serializedScriptValue->deserialize(context, exception);
}
#if PLATFORM(COCOA) && defined(__OBJC__)
static id deserialize(WebCore::SerializedScriptValue&, JSValueRef* exception);
static RefPtr<SerializedScriptValue> createFromNSObject(id);
#endif
#if USE(GLIB)
static JSCContext* sharedJSCContext();
static GRefPtr<JSCValue> deserialize(WebCore::SerializedScriptValue&);
static RefPtr<SerializedScriptValue> createFromGVariant(GVariant*);
static RefPtr<SerializedScriptValue> createFromJSCValue(JSCValue*);
#endif
IPC::DataReference dataReference() const { return m_serializedScriptValue->wireBytes(); }
WebCore::SerializedScriptValue& internalRepresentation() { return m_serializedScriptValue.get(); }
private:
explicit SerializedScriptValue(Ref<WebCore::SerializedScriptValue>&& serializedScriptValue)
: m_serializedScriptValue(WTFMove(serializedScriptValue))
{
}
Ref<WebCore::SerializedScriptValue> m_serializedScriptValue;
};
}
#endif
|
/*********************
*
* FUNCTION: FilterBufferCreate
*
* DESCRIPTION: Initialisation and Memory allocation
* for LPC fiter
*
*
***********************/
FILTERBUFFER_ERRORCODE FilterBufferCreate(FILTERBUFFER_HANDLE *phBuff,INT32 lSignalSize )
{
INT32 lStepsize=40;
INT32 BufferDelay=0;
FILTERBUFFER_ERRORCODE ErrorCode=FILTERBUFFER_OK;
FILTERBUFFER_HANDLE PHand=*phBuff;
if(PHand != NULL)
{
FilterBufferDelete(&PHand);
}
if(PHand == NULL)
{
PHand = (FILTERBUFFER_HANDLE) calloc( 1,sizeof(FILTERBUFFER_DATA) );
if( PHand == NULL )
{
ErrorCode=FILTERBUFFER_MEMORY_ALLOCATION_FAILED;
}
}
if(ErrorCode==FILTERBUFFER_OK)
{
PHand->lStepsize=lStepsize;
PHand->NrSamplesInBuffer=0;
PHand->NrFramesInBuffer=0;
PHand->lMaxNrFrames=lSignalSize/lStepsize;
PHand->lTimeBufferSize=0;
PHand->lResiduumBufferSize=0;
PHand->pdTimeBuffer=NULL;
PHand->pdTimeBuffer=calloc(lSignalSize,sizeof(FLOAT));
if(PHand->pdTimeBuffer==NULL)ErrorCode=LPCANALYSIS_MEMORY_ALLOCATION_FAILED;
PHand->lTimeBufferSize=lSignalSize;
PHand->pFrameData=calloc(PHand->lMaxNrFrames,sizeof(FRAME_DATA));
if(PHand->pFrameData==NULL)ErrorCode=LPCANALYSIS_MEMORY_ALLOCATION_FAILED;
PHand->pdResiduumBuffer=calloc(lSignalSize,sizeof(FLOAT));
if(PHand->pdResiduumBuffer==NULL)ErrorCode=LPCANALYSIS_MEMORY_ALLOCATION_FAILED;
PHand->lResiduumBufferSize=lSignalSize;
PHand->pdSynthBuffer=calloc(lSignalSize,sizeof(FLOAT));
if(PHand->pdSynthBuffer==NULL)ErrorCode=LPCANALYSIS_MEMORY_ALLOCATION_FAILED;
PHand->lSynthBufferSize=lSignalSize;
}
if(ErrorCode==FILTERBUFFER_OK)
{
PHand->lBufferDelay=BufferDelay;
}
if(ErrorCode!=FILTERBUFFER_OK)
{
FilterBufferDelete(&PHand);
}
*phBuff=PHand;
return ErrorCode;
} |
/** \brief Set maximum number of elements
*
* This limit is used only if a discard policy has been set via
* u_hmap_opts_set_policy(); otherwise the hmap is simply resized.
*/
int u_hmap_opts_set_max (u_hmap_opts_t *opts, int max)
{
dbg_err_if (opts == NULL || max <= 0);
opts->max = max;
return U_HMAP_ERR_NONE;
err:
return U_HMAP_ERR_FAIL;
} |
/* Parse_operand takes as input instruction and operands and Parse operands
and makes entry in the template. */
static char *
parse_operands (char *l, const char *mnemonic)
{
char *token_start;
short int expecting_operand = 0;
short int paren_not_balanced;
int operand_ok;
if (mnemonic)
operand_ok = 0;
while (*l != END_OF_INSN)
{
if (is_space_char (*l))
++l;
if (!is_operand_char (*l) && *l != END_OF_INSN)
{
as_bad (_("invalid character %c before operand %d"),
(char) (*l), i.operands + 1);
return NULL;
}
token_start = l;
paren_not_balanced = 0;
while (paren_not_balanced || *l != ',')
{
if (*l == END_OF_INSN)
{
if (paren_not_balanced)
{
as_bad (_("unbalanced brackets in operand %d."),
i.operands + 1);
return NULL;
}
break;
}
else if (!is_operand_char (*l) && !is_space_char (*l))
{
as_bad (_("invalid character %c in operand %d"),
(char) (*l), i.operands + 1);
return NULL;
}
if (*l == '[')
++paren_not_balanced;
if (*l == ']')
--paren_not_balanced;
l++;
}
if (l != token_start)
{
this_operand = i.operands++;
if (i.operands > MAX_OPERANDS)
{
as_bad (_("spurious operands; (%d operands/instruction max)"),
MAX_OPERANDS);
return NULL;
}
END_STRING_AND_SAVE (l);
operand_ok = maxq20_operand (token_start);
RESTORE_END_STRING (l);
if (!operand_ok)
return NULL;
}
else
{
if (expecting_operand)
{
expecting_operand_after_comma:
as_bad (_("expecting operand after ','; got nothing"));
return NULL;
}
}
if (*l == ',')
{
if (*(++l) == END_OF_INSN)
goto expecting_operand_after_comma;
expecting_operand = 1;
}
}
return l;
} |
/*
* Add the record (key, val) to the table
* - key must be an array of n integers
* - n must be no more than TUPLE_HMAP_MAX_ARITY
* - there must not be a record with the same key in the table.
*/
void tuple_hmap_add(tuple_hmap_t *hmap, uint32_t n, int32_t key[], int32_t val) {
tuple_hmap_rec_t *r;
uint32_t i, h, mask;
assert(tuple_hmap_find(hmap, n, key) == NULL);
h = hash_tuple_key(n, key);
r = new_tuple_hmap_record(h, n, key);
r->value = val;
mask = hmap->size - 1;
i = h & mask;
while (live_tuple_record(hmap->data[i])) {
i ++;
i &= mask;
}
assert(hmap->data[i] == NULL || hmap->data[i] == TUPLE_HMAP_DELETED);
if (hmap->data[i] == TUPLE_HMAP_DELETED) {
hmap->ndeleted --;
}
hmap->data[i] = r;
hmap->nelems ++;
if (hmap->nelems + hmap->ndeleted > hmap->resize_threshold) {
tuple_hmap_extend(hmap);
}
} |
/****************************************************************************
Reply to a create temporary file.
****************************************************************************/
int reply_ctemp(connection_struct *conn, char *inbuf,char *outbuf, int dum_size, int dum_buffsize)
{
pstring fname;
int outsize = 0;
uint32 fattr = SVAL(inbuf,smb_vwv0);
files_struct *fsp;
int oplock_request = CORE_OPLOCK_REQUEST(inbuf);
int tmpfd;
SMB_STRUCT_STAT sbuf;
char *p, *s;
NTSTATUS status;
unsigned int namelen;
START_PROFILE(SMBctemp);
srvstr_get_path(inbuf, fname, smb_buf(inbuf)+1, sizeof(fname), 0, STR_TERMINATE, &status);
if (!NT_STATUS_IS_OK(status)) {
END_PROFILE(SMBctemp);
return ERROR_NT(status);
}
if (*fname) {
pstrcat(fname,"/TMXXXXXX");
} else {
pstrcat(fname,"TMXXXXXX");
}
status = resolve_dfspath(conn, SVAL(inbuf,smb_flg2) & FLAGS2_DFS_PATHNAMES, fname);
if (!NT_STATUS_IS_OK(status)) {
END_PROFILE(SMBctemp);
if (NT_STATUS_EQUAL(status,NT_STATUS_PATH_NOT_COVERED)) {
return ERROR_BOTH(NT_STATUS_PATH_NOT_COVERED, ERRSRV, ERRbadpath);
}
return ERROR_NT(status);
}
status = unix_convert(conn, fname, False, NULL, &sbuf);
if (!NT_STATUS_IS_OK(status)) {
END_PROFILE(SMBctemp);
return ERROR_NT(status);
}
status = check_name(conn, fname);
if (!NT_STATUS_IS_OK(status)) {
END_PROFILE(SMBctemp);
return ERROR_NT(status);
}
tmpfd = smb_mkstemp(fname);
if (tmpfd == -1) {
END_PROFILE(SMBctemp);
return(UNIXERROR(ERRDOS,ERRnoaccess));
}
SMB_VFS_STAT(conn,fname,&sbuf);
status = open_file_ntcreate(conn,fname,&sbuf,
FILE_GENERIC_READ | FILE_GENERIC_WRITE,
FILE_SHARE_READ|FILE_SHARE_WRITE,
FILE_OPEN,
0,
fattr,
oplock_request,
NULL, &fsp);
close(tmpfd);
if (!NT_STATUS_IS_OK(status)) {
END_PROFILE(SMBctemp);
if (open_was_deferred(SVAL(inbuf,smb_mid))) {
return -1;
}
return ERROR_OPEN(status);
}
outsize = set_message(outbuf,1,0,True);
SSVAL(outbuf,smb_vwv0,fsp->fnum);
s = strrchr_m(fname, '/');
if (!s) {
s = fname;
} else {
s++;
}
p = smb_buf(outbuf);
#if 0
SSVALS(p, 0, -1);
#endif
namelen = srvstr_push(outbuf, p, s, BUFFER_SIZE - (p - outbuf), STR_ASCII|STR_TERMINATE);
p += namelen;
outsize = set_message_end(outbuf, p);
if (oplock_request && lp_fake_oplocks(SNUM(conn))) {
SCVAL(outbuf,smb_flg,CVAL(outbuf,smb_flg)|CORE_OPLOCK_GRANTED);
}
if (EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type)) {
SCVAL(outbuf,smb_flg,CVAL(outbuf,smb_flg)|CORE_OPLOCK_GRANTED);
}
DEBUG( 2, ( "reply_ctemp: created temp file %s\n", fname ) );
DEBUG( 3, ( "reply_ctemp %s fd=%d umode=0%o\n", fname, fsp->fh->fd,
(unsigned int)sbuf.st_mode ) );
END_PROFILE(SMBctemp);
return(outsize);
} |
/**
* Unregister a channel listener
*
* This function removes a channel listener from the global lists and maps
* and is used when freeing a closed/errored channel listener.
*/
void
channel_listener_unregister(channel_listener_t *chan_l)
{
tor_assert(chan_l);
if (!(chan_l->registered)) return;
if (chan_l->state == CHANNEL_LISTENER_STATE_CLOSED ||
chan_l->state == CHANNEL_LISTENER_STATE_ERROR) {
if (finished_listeners) smartlist_remove(finished_listeners, chan_l);
} else {
if (active_listeners) smartlist_remove(active_listeners, chan_l);
}
if (all_listeners) smartlist_remove(all_listeners, chan_l);
chan_l->registered = 0;
} |
/**
* \brief - return (list of) input port for the specified local port as provided to CGM block.
* Currently,
* CGM uses PP port as an input port, unless "overwrite pp_port by reassembly" is turned on.
*
* Probably in future devices, CGM would use TM port as an input port
*/
shr_error_e
dnx_cosq_cgm_in_port_get(
int unit,
bcm_port_t local_port,
int *nof_entries,
uint32 cgm_in_port[])
{
int core_id;
uint32 reassembly_context[DNX_DATA_MAX_INGR_REASSEMBLY_PRIORITY_CGM_PRIORITIES_NOF];
int i;
uint32 invalid_context;
SHR_FUNC_INIT_VARS(unit);
if (dnx_data_ingr_congestion.config.feature_get(unit,
dnx_data_ingr_congestion_config_pp_port_by_reassembly_overwrite))
{
*nof_entries = 0;
SHR_IF_ERR_EXIT(dnx_port_ingr_reassembly_context_for_cgm_get_all(unit, local_port, reassembly_context));
invalid_context = dnx_data_ingr_reassembly.context.invalid_context_get(unit);
for (i = 0; i < DNX_DATA_MAX_INGR_REASSEMBLY_PRIORITY_CGM_PRIORITIES_NOF; i++)
{
if ((reassembly_context[i] != DNX_PORT_INGR_REASSEMBLY_NON_INGRESS_PORT_CONTEXT)
&& (reassembly_context[i] != invalid_context))
{
cgm_in_port[*nof_entries] = reassembly_context[i];
(*nof_entries)++;
}
}
}
else
{
SHR_IF_ERR_EXIT(dnx_algo_port_pp_port_get(unit, local_port, &core_id, &cgm_in_port[0]));
*nof_entries = 1;
}
exit:
SHR_FUNC_EXIT;
} |
/*
* called to add packet node object to the existing packet list.
*/
static void
add_to_packet_list(hash_obj_t *parent_obj, hash_obj_t *child_obj)
{
hash_obj_t *next_hash;
if (parent_obj->u.seg_node->packet_list == NULL) {
parent_obj->u.seg_node->packet_list = child_obj;
return;
}
for (next_hash = parent_obj->u.seg_node->packet_list;
next_hash->u.pkt_node->next != NULL;
next_hash = next_hash->u.pkt_node->next) {
;
}
next_hash->u.pkt_node->next = child_obj;
} |
/**
* @ingroup sli
* @brief Read the SLIPORT_STATUS register to check if the reset required is set.
*
* @param sli4 SLI context.
*
* @return
* - 0 if call completed correctly and reset is not required.
* - 1 if call completed and reset is required.
* - -1 if call failed.
*/
int32_t sli_reset_required(sli4_t *sli4)
{
uint32_t val;
if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(sli4)) {
ocs_log_test(sli4->os, "reset required N/A for iftype 0\n");
return 0;
}
val = sli_reg_read(sli4, SLI4_REG_SLIPORT_STATUS);
if (UINT32_MAX == val) {
ocs_log_err(sli4->os, "error reading SLIPORT_STATUS\n");
return -1;
} else {
return ((val & SLI4_PORT_STATUS_RN) ? 1 : 0);
}
} |
/* reassemble.h
* Declarations of routines for {fragment,segment} reassembly
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* make sure that all flags that are set in a fragment entry is also set for
* the flags field of fd_head !!!
*/
#ifndef REASSEMBLE_H
#define REASSEMBLE_H
#include "ws_symbol_export.h"
/* only in fd_head: packet is defragmented */
#define FD_DEFRAGMENTED 0x0001
/* there are overlapping fragments */
#define FD_OVERLAP 0x0002
/* overlapping fragments contain different data */
#define FD_OVERLAPCONFLICT 0x0004
/* more than one fragment which indicates end-of data */
#define FD_MULTIPLETAILS 0x0008
/* fragment starts before the end of the datagram but extends
past the end of the datagram */
#define FD_TOOLONGFRAGMENT 0x0010
/* fragment tvb is subset, don't tvb_free() it */
#define FD_SUBSET_TVB 0x0020
/* this flag is used to request fragment_add to continue the reassembly process */
#define FD_PARTIAL_REASSEMBLY 0x0040
/* fragment offset is indicated by sequence number and not byte offset
into the defragmented packet */
#define FD_BLOCKSEQUENCE 0x0100
/* This flag is set in (only) fd_head to denote that datalen has been set to a valid value.
* It's implied by FD_DEFRAGMENTED (we must know the total length of the
* datagram if we have defragmented it...)
*/
#define FD_DATALEN_SET 0x0400
typedef struct _fragment_item {
struct _fragment_item *next;
guint32 frame; /* XXX - does this apply to reassembly heads? */
guint32 offset; /* XXX - does this apply to reassembly heads? */
guint32 len; /* XXX - does this apply to reassembly heads? */
guint32 fragment_nr_offset; /**< offset for frame numbering, for sequences, where the
* provided fragment number of the first fragment does
* not start with 0
* XXX - does this apply only to reassembly heads? */
guint32 datalen; /**< When flags&FD_BLOCKSEQUENCE is set, the
index of the last block (segments in
datagram + 1); otherwise the number of
bytes of the full datagram. Only valid in
the first item of the fragments list when
flags&FD_DATALEN is set.*/
guint32 reassembled_in; /**< frame where this PDU was reassembled,
only valid in the first item of the list
and when FD_DEFRAGMENTED is set*/
guint8 reas_in_layer_num; /**< The current "depth" or layer number in the current frame where reassembly was completed.
* Example: in SCTP there can be several data chunks and we want the reassemblied tvb for the final
* segment only.
*/
guint32 flags; /**< XXX - do some of these apply only to reassembly
heads and others only to fragments within
a reassembly? */
tvbuff_t *tvb_data;
/**
* Null if the reassembly had no error; non-null if it had
* an error, in which case it's the string for the error.
*
* XXX - this is wasted in all but the reassembly head; we
* should probably have separate data structures for a
* reassembly and for the fragments in a reassembly.
*/
const char *error;
} fragment_item, fragment_head;
/*
* Flags for fragment_add_seq_*
*/
/* we don't have any sequence numbers - fragments are assumed to appear in
* order */
#define REASSEMBLE_FLAGS_NO_FRAG_NUMBER 0x0001
/* a special fudge for the 802.11 dissector */
#define REASSEMBLE_FLAGS_802_11_HACK 0x0002
/*
* Generates a fragment identifier based on the given parameters. "data" is an
* opaque type whose interpretation is up to the caller of fragment_add*
* functions and the fragment key function (possibly NULL if you do not care).
*
* Keys returned by this function are only used within this packet scope.
*/
typedef gpointer (*fragment_temporary_key)(const packet_info *pinfo,
const guint32 id, const void *data);
/*
* Like fragment_temporary_key, but used for identifying reassembled fragments
* which may persist through multiple packets.
*/
typedef gpointer (*fragment_persistent_key)(const packet_info *pinfo,
const guint32 id, const void *data);
/*
* Data structure to keep track of fragments and reassemblies.
*/
typedef struct {
GHashTable *fragment_table;
GHashTable *reassembled_table;
fragment_temporary_key temporary_key_func;
fragment_persistent_key persistent_key_func;
GDestroyNotify free_temporary_key_func; /* temporary key destruction function */
} reassembly_table;
/*
* Table of functions for a reassembly table.
*/
typedef struct {
/* Functions for fragment table */
GHashFunc hash_func; /* hash function */
GEqualFunc equal_func; /* comparison function */
fragment_temporary_key temporary_key_func; /* temporary key creation function */
fragment_persistent_key persistent_key_func; /* persistent key creation function */
GDestroyNotify free_temporary_key_func; /* temporary key destruction function */
GDestroyNotify free_persistent_key_func; /* persistent key destruction function */
} reassembly_table_functions;
/*
* Tables of functions exported for the benefit of dissectors that
* don't need special items in their keys.
*/
WS_DLL_PUBLIC const reassembly_table_functions
addresses_reassembly_table_functions; /* keys have endpoint addresses and an ID */
WS_DLL_PUBLIC const reassembly_table_functions
addresses_ports_reassembly_table_functions; /* keys have endpoint addresses and ports and an ID */
/*
* Initialize/destroy a reassembly table.
*
* init: If table doesn't exist: create table;
* else: just remove any entries;
* destroy: remove entries and destroy table;
*/
WS_DLL_PUBLIC void
reassembly_table_init(reassembly_table *table,
const reassembly_table_functions *funcs);
WS_DLL_PUBLIC void
reassembly_table_destroy(reassembly_table *table);
/*
* This function adds a new fragment to the reassembly table
* If this is the first fragment seen for this datagram, a new entry
* is created in the table, otherwise this fragment is just added
* to the linked list of fragments for this packet.
* The list of fragments for a specific datagram is kept sorted for
* easier handling.
*
* Datagrams (messages) are identified by a key generated by
* fragment_temporary_key or fragment_persistent_key, based on the "pinfo", "id"
* and "data" pairs. (This is the sole purpose of "data".)
*
* Fragments are identified by "frag_offset".
*
* Returns a pointer to the head of the fragment data list if we have all the
* fragments, NULL otherwise. Note that the reassembled fragments list may have
* a non-zero fragment offset, the only guarantee is that no gaps exist within
* the list.
*/
WS_DLL_PUBLIC fragment_head *
fragment_add(reassembly_table *table, tvbuff_t *tvb, const int offset,
const packet_info *pinfo, const guint32 id, const void *data,
const guint32 frag_offset, const guint32 frag_data_len,
const gboolean more_frags);
/*
* Like fragment_add, except that the fragment may be added to multiple
* reassembly tables. This is needed when multiple protocol layers try
* to add the same packet to the reassembly table.
*/
WS_DLL_PUBLIC fragment_head *
fragment_add_multiple_ok(reassembly_table *table, tvbuff_t *tvb,
const int offset, const packet_info *pinfo,
const guint32 id, const void *data,
const guint32 frag_offset,
const guint32 frag_data_len,
const gboolean more_frags);
/*
* Like fragment_add, but maintains a table for completed reassemblies.
*
* If the packet was seen before, return the head of the fully reassembled
* fragments list (NULL if there was none).
*
* Otherwise (if reassembly was not possible before), try to to add the new
* fragment to the fragments table. If reassembly is now possible, remove all
* (reassembled) fragments from the fragments table and store it as a completed
* reassembly. The head of this reassembled fragments list is returned.
*
* Otherwise (if reassembly is still not possible after adding this fragment),
* return NULL.
*/
WS_DLL_PUBLIC fragment_head *
fragment_add_check(reassembly_table *table, tvbuff_t *tvb, const int offset,
const packet_info *pinfo, const guint32 id,
const void *data, const guint32 frag_offset,
const guint32 frag_data_len, const gboolean more_frags);
/*
* Like fragment_add, but fragments have a block sequence number starting from
* zero (for the first fragment of each datagram). This differs from
* fragment_add for which the fragment may start at any offset.
*
* If this is the first fragment seen for this datagram, a new
* "fragment_head" structure is allocated to refer to the reassembled
* packet, and:
*
* if "more_frags" is false, and either we have no sequence numbers, or
* are using the 802.11 hack (via fragment_add_seq_802_11), it is assumed that
* this is the only fragment in the datagram. The structure is not added to the
* hash table, and not given any fragments to refer to, but is just returned.
*
* In this latter case reassembly wasn't done (since there was only one
* fragment in the packet); dissectors can check the 'next' pointer on the
* returned list to see if this case was hit or not.
*
* Otherwise, this fragment is just added to the linked list of fragments
* for this packet; the fragment_item is also added to the fragment hash if
* necessary.
*
* If this packet completes assembly, these functions return the head of the
* fragment data; otherwise, they return null.
*/
WS_DLL_PUBLIC fragment_head *
fragment_add_seq(reassembly_table *table, tvbuff_t *tvb, const int offset,
const packet_info *pinfo, const guint32 id, const void *data,
const guint32 frag_number, const guint32 frag_data_len,
const gboolean more_frags, const guint32 flags);
/*
* Like fragment_add_seq, but maintains a table for completed reassemblies
* just like fragment_add_check.
*/
WS_DLL_PUBLIC fragment_head *
fragment_add_seq_check(reassembly_table *table, tvbuff_t *tvb, const int offset,
const packet_info *pinfo, const guint32 id,
const void *data,
const guint32 frag_number, const guint32 frag_data_len,
const gboolean more_frags);
/*
* Like fragment_add_seq_check, but immediately returns a fragment list for a
* new fragment. This is a workaround specific for the 802.11 dissector, do not
* use it elsewhere.
*/
WS_DLL_PUBLIC fragment_head *
fragment_add_seq_802_11(reassembly_table *table, tvbuff_t *tvb,
const int offset, const packet_info *pinfo,
const guint32 id, const void *data,
const guint32 frag_number, const guint32 frag_data_len,
const gboolean more_frags);
/*
* Like fragment_add_seq_check, but without explicit fragment number. Fragments
* are simply appended until no "more_frags" is false.
*/
WS_DLL_PUBLIC fragment_head *
fragment_add_seq_next(reassembly_table *table, tvbuff_t *tvb, const int offset,
const packet_info *pinfo, const guint32 id,
const void *data, const guint32 frag_data_len,
const gboolean more_frags);
/*
* Start a reassembly, expecting "tot_len" as the number of given fragments (not
* the number of bytes). Data can be added later using fragment_add_seq_check.
*/
WS_DLL_PUBLIC void
fragment_start_seq_check(reassembly_table *table, const packet_info *pinfo,
const guint32 id, const void *data,
const guint32 tot_len);
/*
* Mark end of reassembly and returns the reassembled fragment (if completed).
* Use it when fragments were added with "more_flags" set while you discovered
* that no more fragments have to be added.
* XXX rename to fragment_finish as it works also for fragment_add?
*/
WS_DLL_PUBLIC fragment_head *
fragment_end_seq_next(reassembly_table *table, const packet_info *pinfo,
const guint32 id, const void *data);
/* To specify the offset for the fragment numbering, the first fragment is added with 0, and
* afterwards this offset is set. All additional calls to off_seq_check will calculate
* the number in sequence in regards to the offset */
WS_DLL_PUBLIC void
fragment_add_seq_offset(reassembly_table *table, const packet_info *pinfo, const guint32 id,
const void *data, const guint32 fragment_offset);
/*
* Sets the expected index for the last block (for fragment_add_seq functions)
* or the expected number of bytes (for fragment_add functions). A reassembly
* must already have started.
*
* Note that for FD_BLOCKSEQUENCE tot_len is the index for the tail fragment.
* i.e. since the block numbers start at 0, if we specify tot_len==2, that
* actually means we want to defragment 3 blocks, block 0, 1 and 2.
*/
WS_DLL_PUBLIC void
fragment_set_tot_len(reassembly_table *table, const packet_info *pinfo,
const guint32 id, const void *data, const guint32 tot_len);
/*
* Return the expected index for the last block (for fragment_add_seq functions)
* or the expected number of bytes (for fragment_add functions).
*/
WS_DLL_PUBLIC guint32
fragment_get_tot_len(reassembly_table *table, const packet_info *pinfo,
const guint32 id, const void *data);
/*
* This function will set the partial reassembly flag(FD_PARTIAL_REASSEMBLY) for a fh.
* When this function is called, the fh MUST already exist, i.e.
* the fh MUST be created by the initial call to fragment_add() before
* this function is called. Also note that this function MUST be called to indicate
* a fh will be extended (increase the already stored data). After calling this function,
* and if FD_DEFRAGMENTED is set, the reassembly process will be continued.
*/
WS_DLL_PUBLIC void
fragment_set_partial_reassembly(reassembly_table *table,
const packet_info *pinfo, const guint32 id,
const void *data);
/* This function is used to check if there is partial or completed reassembly state
* matching this packet. I.e. Are there reassembly going on or not for this packet?
*/
WS_DLL_PUBLIC fragment_head *
fragment_get(reassembly_table *table, const packet_info *pinfo,
const guint32 id, const void *data);
/* The same for the reassemble table */
/* id *must* be the frame number for this to work! */
WS_DLL_PUBLIC fragment_head *
fragment_get_reassembled(reassembly_table *table, const guint32 id);
WS_DLL_PUBLIC fragment_head *
fragment_get_reassembled_id(reassembly_table *table, const packet_info *pinfo,
const guint32 id);
/* This will free up all resources and delete reassembly state for this PDU.
* Except if the PDU is completely reassembled, then it would NOT deallocate the
* buffer holding the reassembled data but instead return the TVB
*
* So, if you call fragment_delete and it returns non-NULL, YOU are responsible to
* tvb_free() .
*/
WS_DLL_PUBLIC tvbuff_t *
fragment_delete(reassembly_table *table, const packet_info *pinfo,
const guint32 id, const void *data);
/* This struct holds references to all the tree and field handles used when
* displaying the reassembled fragment tree in the packet details view. A
* dissector will populate this structure with its own tree and field handles
* and then invoke show_fragement_tree to have those items added to the packet
* details tree.
*/
typedef struct _fragment_items {
gint *ett_fragment;
gint *ett_fragments;
int *hf_fragments; /* FT_NONE */
int *hf_fragment; /* FT_FRAMENUM */
int *hf_fragment_overlap; /* FT_BOOLEAN */
int *hf_fragment_overlap_conflict; /* FT_BOOLEAN */
int *hf_fragment_multiple_tails; /* FT_BOOLEAN */
int *hf_fragment_too_long_fragment; /* FT_BOOLEAN */
int *hf_fragment_error; /* FT_FRAMENUM */
int *hf_fragment_count; /* FT_UINT32 */
int *hf_reassembled_in; /* FT_FRAMENUM */
int *hf_reassembled_length; /* FT_UINT32 */
int *hf_reassembled_data; /* FT_BYTES */
const char *tag;
} fragment_items;
WS_DLL_PUBLIC tvbuff_t *
process_reassembled_data(tvbuff_t *tvb, const int offset, packet_info *pinfo,
const char *name, fragment_head *fd_head, const fragment_items *fit,
gboolean *update_col_infop, proto_tree *tree);
WS_DLL_PUBLIC gboolean
show_fragment_tree(fragment_head *ipfd_head, const fragment_items *fit,
proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb, proto_item **fi);
WS_DLL_PUBLIC gboolean
show_fragment_seq_tree(fragment_head *ipfd_head, const fragment_items *fit,
proto_tree *tree, packet_info *pinfo, tvbuff_t *tvb, proto_item **fi);
#endif
|
/**
* Convert a byte array to a 16 bits unsigned integer
* @param pByte byte array
* @param size size of the array
* @result 16 bits unsigned integer
*/
inline uint16_t ConvertBytesToUint16(const uint8_t* pByte, size_t size)
{
assert(size >= sizeof(uint16_t));
return ((static_cast<uint16_t>(pByte[0]) << 8) & 0xFF00) |
((static_cast<uint16_t>(pByte[1]) << 0) & 0x00FF);
} |
/**
* Note that a thread in the cleaner has finished whatever work it was doing
* for the moment. This is used to maintain a distribution of the amount of
* time the cleaner spends with various numbers of threads running.
*/
void
noteThreadStop()
{
lock.lock();
activeTicks[activeThreads] += cycleCounter->stop();
cycleCounter.construct();
activeThreads--;
lock.unlock();
} |
/* Creates child to send OP_FOUND to all recent peers */
int send_found(void)
{
word32 *ipp;
NODE node;
BTRAILER bt;
char fname[128];
int ecode;
if(Sendfound_pid)
return error("send_found() already running!");
Sendfound_pid = fork();
if(Sendfound_pid == -1) {
Sendfound_pid = 0;
return VERROR;
}
if(Sendfound_pid) return VEOK;
show("found_child");
if(Cblocknum[0] == 0) {
ecode = 1;
if(sub64(Cblocknum, One, Cblocknum)) goto bad;
sprintf(fname, "%s/b%s.bc", Bcdir, bnum2hex(Cblocknum));
ecode = 2;
if(readtrailer(&bt, fname) != VEOK
|| cmp64(Cblocknum, bt.bnum) != 0) {
bad:
error("send_found(): ecode: %d", ecode);
exit(1);
}
ecode = 3;
if(memcmp(Prevhash, bt.bhash, HASHLEN)) goto bad;
memcpy(Cblockhash, bt.bhash, HASHLEN);
memcpy(Prevhash, bt.phash, HASHLEN);
}
if(Trace)
plog("send_found(0x%s)", bnum2hex(Cblocknum));
shuffle32(Rplist, RPLISTLEN);
for(ipp = Rplist; ipp < &Rplist[RPLISTLEN] && Running; ipp++) {
if(*ipp == 0) continue;
if(callserver(&node, *ipp) != VEOK) continue;
send_op(&node, OP_FOUND);
closesocket(node.sd);
}
exit(0);
} |
#include<stdio.h>
int main()
{
char s[101];
gets(s);
int i,j,temp,min_index;
for(i=0; i<strlen(s)-1; i+=2)
{
min_index=i;
for(j=i+2; j<strlen(s); j+=2)
{
if(s[j]<s[min_index])
min_index=j;
}
if(i!=min_index)
{
temp=s[i];
s[i] =s[min_index];
s[min_index]=temp;
}
}
printf("%s\n",s);
return 0;
}
|
/*
* generator_free
* free the internal state of the generator
*
* Releases the generator internal state (pre-built combinations).
*/
static void
generator_free(CombinationGenerator *state)
{
pfree(state->combinations);
pfree(state);
} |
/**
* Progagate any changes from surfaces to texture.
* pipe is optional context to inline the blit command in.
*/
void
svga_propagate_surface(struct svga_context *svga, struct pipe_surface *surf)
{
struct svga_surface *s = svga_surface(surf);
struct svga_texture *tex = svga_texture(surf->texture);
struct svga_screen *ss = svga_screen(surf->texture->screen);
unsigned zslice, face;
if (!s->dirty)
return;
if (surf->texture->target == PIPE_TEXTURE_CUBE) {
zslice = 0;
face = surf->u.tex.first_layer;
}
else {
zslice = surf->u.tex.first_layer;
face = 0;
}
s->dirty = FALSE;
ss->texture_timestamp++;
tex->view_age[surf->u.tex.level] = ++(tex->age);
if (s->handle != tex->handle) {
SVGA_DBG(DEBUG_VIEWS,
"svga: Surface propagate: tex %p, level %u, from %p\n",
tex, surf->u.tex.level, surf);
svga_texture_copy_handle(svga,
s->handle, 0, 0, 0, s->real_level, s->real_face,
tex->handle, 0, 0, zslice, surf->u.tex.level, face,
u_minify(tex->b.b.width0, surf->u.tex.level),
u_minify(tex->b.b.height0, surf->u.tex.level), 1);
tex->defined[face][surf->u.tex.level] = TRUE;
}
} |
/** Return true iff we think our firewall will let us make an OR connection to
* <b>node</b>. */
int
fascist_firewall_allows_node(const node_t *node)
{
if (node->ri) {
return fascist_firewall_allows_or(node->ri);
} else if (node->rs) {
tor_addr_t addr;
tor_addr_from_ipv4h(&addr, node->rs->addr);
return fascist_firewall_allows_address_or(&addr, node->rs->or_port);
} else {
return 1;
}
} |
#include<stdio.h>
int f(int X,int Y)
{ int m;
do
{
m=X%Y;
X=Y;
Y=m;
}while(m!=0);
return X;
}
int main()
{
int a,b,n,x,r;
scanf("%d%d%d",&a,&b,&n);
while(1)
{
x=f(a,n);
n-=x;
if(!n)
{
r=0;
break;
}
x=f(b,n);
n-=x;
if(!n)
{
r=1;
break;
}
}
printf("%d\n",r);
return 0;
}
|
/**
* Sets Matrix to translate by (dx, dy). Returned matrix is:
*
* | 1 0 dx |
* | 0 1 dy |
* | 0 0 1 |
*
* @param dx horizontal translation
* @param dy vertical translation
* @return Matrix with translation
*/
static Matrix MakeTrans(float dx, float dy) {
Matrix m = {};
m.setTranslate(dx, dy);
return m;
} |
#include<stdio.h>
int main(){
int e[4]={0,0,0,0},i,j,k=0;
scanf("%d%d%d%d",&e[0],&e[1],&e[2],&e[3]);
if(e[0]==e[3]&&e[1]==e[2]||e[1]==e[2]&&e[0]==e[3]||e[2]==e[3]&&e[0]==e[1]){
printf("yes\n");
}else{
printf("no\n");
}
return 0;
} |
/*
* arch/arm/include/asm/mcpm.h
*
* Created by: Nicolas Pitre, April 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef MCPM_H
#define MCPM_H
/*
* Maximum number of possible clusters / CPUs per cluster.
*
* This should be sufficient for quite a while, while keeping the
* (assembly) code simpler. When this starts to grow then we'll have
* to consider dynamic allocation.
*/
#define MAX_CPUS_PER_CLUSTER 4
#define MAX_NR_CLUSTERS 2
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/cacheflush.h>
/*
* Platform specific code should use this symbol to set up secondary
* entry location for processors to use when released from reset.
*/
extern void mcpm_entry_point(void);
/*
* This is used to indicate where the given CPU from given cluster should
* branch once it is ready to re-enter the kernel using ptr, or NULL if it
* should be gated. A gated CPU is held in a WFE loop until its vector
* becomes non NULL.
*/
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
/*
* This sets an early poke i.e a value to be poked into some address
* from very early assembly code before the CPU is ungated. The
* address must be physical, and if 0 then nothing will happen.
*/
void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
unsigned long poke_phys_addr, unsigned long poke_val);
/*
* CPU/cluster power operations API for higher subsystems to use.
*/
/**
* mcpm_is_available - returns whether MCPM is initialized and available
*
* This returns true or false accordingly.
*/
bool mcpm_is_available(void);
/**
* mcpm_cpu_power_up - make given CPU in given cluster runable
*
* @cpu: CPU number within given cluster
* @cluster: cluster number for the CPU
*
* The identified CPU is brought out of reset. If the cluster was powered
* down then it is brought up as well, taking care not to let the other CPUs
* in the cluster run, and ensuring appropriate cluster setup.
*
* Caller must ensure the appropriate entry vector is initialized with
* mcpm_set_entry_vector() prior to calling this.
*
* This must be called in a sleepable context. However, the implementation
* is strongly encouraged to return early and let the operation happen
* asynchronously, especially when significant delays are expected.
*
* If the operation cannot be performed then an error code is returned.
*/
int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
/**
* mcpm_cpu_power_down - power the calling CPU down
*
* The calling CPU is powered down.
*
* If this CPU is found to be the "last man standing" in the cluster
* then the cluster is prepared for power-down too.
*
* This must be called with interrupts disabled.
*
* On success this does not return. Re-entry in the kernel is expected
* via mcpm_entry_point.
*
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
*
* On success, the CPU is not guaranteed to be truly halted until
* mcpm_cpu_power_down_finish() subsequently returns non-zero for the
* specified cpu. Until then, other CPUs should make sure they do not
* trash memory the target CPU might be executing/accessing.
*/
void mcpm_cpu_power_down(void);
/**
* mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and
* make sure it is powered off
*
* @cpu: CPU number within given cluster
* @cluster: cluster number for the CPU
*
* Call this function to ensure that a pending powerdown has taken
* effect and the CPU is safely parked before performing non-mcpm
* operations that may affect the CPU (such as kexec trashing the
* kernel text).
*
* It is *not* necessary to call this function if you only need to
* serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
* event.
*
* Do not call this function unless the specified CPU has already
* called mcpm_cpu_power_down() or has committed to doing so.
*
* @return:
* - zero if the CPU is in a safely parked state
* - nonzero otherwise (e.g., timeout)
*/
int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster);
/**
* mcpm_cpu_suspend - bring the calling CPU in a suspended state
*
* @expected_residency: duration in microseconds the CPU is expected
* to remain suspended, or 0 if unknown/infinity.
*
* The calling CPU is suspended. The expected residency argument is used
* as a hint by the platform specific backend to implement the appropriate
* sleep state level according to the knowledge it has on wake-up latency
* for the given hardware.
*
* If this CPU is found to be the "last man standing" in the cluster
* then the cluster may be prepared for power-down too, if the expected
* residency makes it worthwhile.
*
* This must be called with interrupts disabled.
*
* On success this does not return. Re-entry in the kernel is expected
* via mcpm_entry_point.
*
* This will return if mcpm_platform_register() has not been called
* previously in which case the caller should take appropriate action.
*/
void mcpm_cpu_suspend(u64 expected_residency);
/**
* mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
*
* This lets the platform specific backend code perform needed housekeeping
* work. This must be called by the newly activated CPU as soon as it is
* fully operational in kernel space, before it enables interrupts.
*
* If the operation cannot be performed then an error code is returned.
*/
int mcpm_cpu_powered_up(void);
/*
* Platform specific methods used in the implementation of the above API.
*/
struct mcpm_platform_ops {
int (*power_up)(unsigned int cpu, unsigned int cluster);
void (*power_down)(void);
int (*power_down_finish)(unsigned int cpu, unsigned int cluster);
void (*suspend)(u64);
void (*powered_up)(void);
};
/**
* mcpm_platform_register - register platform specific power methods
*
* @ops: mcpm_platform_ops structure to register
*
* An error is returned if the registration has been done previously.
*/
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
/* Synchronisation structures for coordinating safe cluster setup/teardown: */
/*
* When modifying this structure, make sure you update the MCPM_SYNC_ defines
* to match.
*/
struct mcpm_sync_struct {
/* individual CPU states */
struct {
s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
} cpus[MAX_CPUS_PER_CLUSTER];
/* cluster state */
s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
/* inbound-side state */
s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
};
struct sync_struct {
struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
};
extern unsigned long sync_phys; /* physical address of *mcpm_sync */
void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
int __mcpm_cluster_state(unsigned int cluster);
int __init mcpm_sync_init(
void (*power_up_setup)(unsigned int affinity_level));
void __init mcpm_smp_set_ops(void);
#else
/*
* asm-offsets.h causes trouble when included in .c files, and cacheflush.h
* cannot be included in asm files. Let's work around the conflict like this.
*/
#include <asm/asm-offsets.h>
#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
#endif /* ! __ASSEMBLY__ */
/* Definitions for mcpm_sync_struct */
#define CPU_DOWN 0x11
#define CPU_COMING_UP 0x12
#define CPU_UP 0x13
#define CPU_GOING_DOWN 0x14
#define CLUSTER_DOWN 0x21
#define CLUSTER_UP 0x22
#define CLUSTER_GOING_DOWN 0x23
#define INBOUND_NOT_COMING_UP 0x31
#define INBOUND_COMING_UP 0x32
/*
* Offsets for the mcpm_sync_struct members, for use in asm.
* We don't want to make them global to the kernel via asm-offsets.c.
*/
#define MCPM_SYNC_CLUSTER_CPUS 0
#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
#define MCPM_SYNC_CLUSTER_CLUSTER \
(MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
#define MCPM_SYNC_CLUSTER_INBOUND \
(MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
#define MCPM_SYNC_CLUSTER_SIZE \
(MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
#endif
|
#include <stdio.h>
int n;
int num[200100];
int res[200100];
void seek( int i )
{
res[i]=0;
int t=1,j=i-1;
while(t<res[j])
{
res[j]=t;
t++;j--;
}
j=i+1;
t=1;
while(num[j]!=0&&j<n)
{
res[j]=t;
t++;j++;
}
if(j!=n)
seek(j);
}
int main()
{
scanf("%d",&n);
int i,j,k,t;
for(i=0;i<n;i++)
scanf("%d",&num[i]);
for(i=0;i<n;i++)
{
if(num[i]==0)
{
t=i;
for(j=0;t>=0;t--&&j++)
{
res[t]=j;
}
i++;
break;
}
}
t=1;
while(num[i]!=0&&i<n)
{
res[i]=t;
t++;
i++;
}
if(i!=n)
seek(i);
for(i=0;i<n;i++)
printf("%d ",res[i]);
return 0;
}
|
/*
* File: main.c
* Autor: Ricardo Alexander Bartra Quispe
* Codigo: 20176243
* Creado en: 9 de agosto de 2020, 08:36 AM
*/
#include <stdio.h>
#include <stdlib.h>
int calcularMovMin(int a,int b) {
int cantMov = 0;
int diferencia;
int x, y;
if (a == b) return cantMov;
for (int i = 0; i < 3; i++) {
cantMov++;
if (a < b) {
diferencia = b - a;
if (diferencia % 2 == 0)
x = diferencia - 1;
else x = diferencia;
a = a + x;
if (a == b) return cantMov;
continue;
} else {
diferencia = a - b;
if (diferencia % 2 != 0)
y = diferencia + 1;
else y = diferencia;
a = a - y;
if (a == b) return cantMov;
continue;
}
}
}
int main(int argc, char** argv) {
int test[10000]; /*Resultados*/
int a,b;
int t; /*Num test*/
scanf("%d",&t);
for(int i=0;i<t;i++){
scanf("%d %d",&a,&b);
test[i]=calcularMovMin(a,b);
}
for(int i=0;i<t;i++){
printf("%d\n",test[i]);
}
return (EXIT_SUCCESS);
}
|
// This shoud translate to straightforward one-armed if's
int early_returns(int a) {
if (a == 2) {
return 2;
}
if (a == 3) {
a += 1;
}
if (a == 4) {
return 1;
}
return 0;
} |
/*
* Index of a 512 entry set of page pointers in the table, given a level.
*/
static inline int ase_pt_idx(uint64_t iova, int pt_level)
{
assert(pt_level <= 3);
return 0x1ff & (iova >> ase_pt_level_to_bit_idx(pt_level));
} |
/*------------------------------------------------------------------------*
* usbd_set_alt_interface_index
*
* This function will select an alternate interface index for the
* given interface index. The interface should not be in use when this
* function is called. That means there should not be any open USB
* transfers. Else an error is returned. If the alternate setting is
* already set this function will simply return success. This function
* is called in Host mode and Device mode!
*
* Returns:
* 0: Success
* Else: Failure
*------------------------------------------------------------------------*/
usb_error_t
usbd_set_alt_interface_index(struct usb_device *udev,
uint8_t iface_index, uint8_t alt_index)
{
SDT_PROBE0(tpw, kernel, usb_device_usbd_set_alt_interface_index, entry);
struct usb_interface *iface = usbd_get_iface(udev, iface_index);
usb_error_t err;
uint8_t do_unlock;
do_unlock = usbd_enum_lock(udev);
if (iface == NULL) {
err = USB_ERR_INVAL;
goto done;
}
if (iface->alt_index == alt_index) {
err = 0;
goto done;
}
#if USB_HAVE_UGEN
usb_fifo_free_wrap(udev, iface_index, 0);
#endif
err = usb_config_parse(udev, iface_index, alt_index);
if (err) {
goto done;
}
if (iface->alt_index != alt_index) {
err = USB_ERR_INVAL;
goto done;
}
err = usbd_req_set_alt_interface_no(udev, NULL, iface_index,
iface->idesc->bAlternateSetting);
done:
if (do_unlock)
{
usbd_enum_unlock(udev);
}
SDT_PROBE0(tpw, kernel, usb_device_usbd_set_alt_interface_index, return);
return (err);
} |
/*
* dqueue_pop_back - Delete last element
*
* Removes the last element in the dqueue container,
* effectively reducing its size by one.
*
* dqueue: dqueue
*
* Note: dlist_empty() on last node return true after this
*/
static inline void dqueue_pop_back(struct dqueue_t *dqueue)
{
if(dqueue->size)
dqueue_del(dqueue->head.prev, dqueue);
} |
/* Set/clear bit 8 (Trap Flag) of the EFLAGS processor control
register to enable/disable single-step mode.
ENABLE is a boolean, indicating whether to set (1) the Trap Flag
or clear it (0). */
static bool modify_trace_bit(RzDebug *dbg, xnu_thread_t *th, int enable) {
RZ_REG_T *state;
int ret;
ret = xnu_thread_get_gpr(dbg, th);
if (!ret) {
eprintf("error to get gpr registers in trace bit intel\n");
return false;
}
state = (RZ_REG_T *)&th->gpr;
if (state->tsh.flavor == x86_THREAD_STATE32) {
state->uts.ts32.__eflags = (state->uts.ts32.__eflags &
~0x100UL) |
(enable ? 0x100UL : 0);
} else if (state->tsh.flavor == x86_THREAD_STATE64) {
state->uts.ts64.__rflags = (state->uts.ts64.__rflags &
~0x100UL) |
(enable ? 0x100UL : 0);
} else {
eprintf("Invalid bit size\n");
return false;
}
if (!xnu_thread_set_gpr(dbg, th)) {
eprintf("error xnu_thread_set_gpr in modify_trace_bit intel\n");
return false;
}
return true;
} |
/*
* gsmsdp_negotiate_local_sdp_direction
*
* Description:
*
* Given an offer SDP, return the corresponding answer SDP direction.
*
* local hold remote direction support direction new local direction
* enabled inactive any inactive
* enabled sendrecv sendonly sendonly
* enabled sendrecv recvonly inactive
* enabled sendrecv sendrecv sendonly
* enabled sendrecv inactive inactive
* enabled sendonly any inactive
* enabled recvonly sendrecv sendonly
* enabled recvonly sendonly sendonly
* enabled recvonly recvonly inactive
* enabled recvonly inactive inactive
* disabled inactive any inactive
* disabled sendrecv sendrecv sendrecv
* disabled sendrecv sendonly sendonly
* disabled sendrecv recvonly recvonly
* disabled sendrecv inactive inactive
* disabled sendonly sendrecv recvonly
* disabled sendonly sendonly inactive
* disabled sendonly recvonly recvonly
* disabled sendonly inactive inactive
* disabled recvonly sendrecv sendonly
* disabled recvonly sendonly sendonly
* disabled recvonly recvonly inactive
* disabled recvonly inactive inactive
*
* Parameters:
*
* dcb_p - pointer to the fsmdef_dcb_t.
* media - pointer to the fsmdef_media_t for the current media entry.
* local_hold - Boolean indicating if local hold feature is enabled
*/
static sdp_direction_e
gsmsdp_negotiate_local_sdp_direction (fsmdef_dcb_t *dcb_p,
fsmdef_media_t *media,
boolean local_hold)
{
sdp_direction_e direction = SDP_DIRECTION_SENDRECV;
sdp_direction_e remote_direction = gsmsdp_get_remote_sdp_direction(dcb_p,
media->level, &media->dest_addr);
if (remote_direction == SDP_DIRECTION_SENDRECV) {
if (local_hold) {
if ((media->support_direction == SDP_DIRECTION_SENDRECV) ||
(media->support_direction == SDP_DIRECTION_SENDONLY)) {
direction = SDP_DIRECTION_SENDONLY;
} else {
direction = SDP_DIRECTION_INACTIVE;
}
} else {
direction = media->support_direction;
}
} else if (remote_direction == SDP_DIRECTION_SENDONLY) {
if (local_hold) {
direction = SDP_DIRECTION_INACTIVE;
} else {
if ((media->support_direction == SDP_DIRECTION_SENDRECV) ||
(media->support_direction == SDP_DIRECTION_RECVONLY)) {
direction = SDP_DIRECTION_RECVONLY;
} else {
direction = SDP_DIRECTION_INACTIVE;
}
}
} else if (remote_direction == SDP_DIRECTION_INACTIVE) {
direction = SDP_DIRECTION_INACTIVE;
} else if (remote_direction == SDP_DIRECTION_RECVONLY) {
if ((media->support_direction == SDP_DIRECTION_SENDRECV) ||
(media->support_direction == SDP_DIRECTION_SENDONLY)) {
direction = SDP_DIRECTION_SENDONLY;
} else {
direction = SDP_DIRECTION_INACTIVE;
}
}
return direction;
} |
/* Compare the declaration (DECL) of struct-like types based on the sloc of
their last field (if LAST is true), so that more nested types collate before
less nested ones.
If ORIG_TYPE is true, also consider struct with a DECL_ORIGINAL_TYPE. */
static location_t
decl_sloc_common (const_tree decl, bool last, bool orig_type)
{
tree type = TREE_TYPE (decl);
if (TREE_CODE (decl) == TYPE_DECL
&& (orig_type || !DECL_ORIGINAL_TYPE (decl))
&& RECORD_OR_UNION_TYPE_P (type)
&& TYPE_FIELDS (type))
{
tree f = TYPE_FIELDS (type);
if (last)
while (TREE_CHAIN (f))
f = TREE_CHAIN (f);
return DECL_SOURCE_LOCATION (f);
}
else
return DECL_SOURCE_LOCATION (decl);
} |
#ifndef HCALRECHITANALYZER_H
#define HCALRECHITANALYZER_H
// author: Bobby Scurlock (The University of Florida)
// date: 8/24/2006
// modification: Mike Schmitt
// date: 02.28.2007
// note: code rewrite
#include "DQMServices/Core/interface/DQMStore.h"
#include "FWCore/Framework/interface/Frameworkfwd.h"
#include "FWCore/Utilities/interface/InputTag.h"
#include "DQMServices/Core/interface/DQMEDAnalyzer.h"
#include "DataFormats/HcalRecHit/interface/HBHERecHit.h"
#include "DataFormats/HcalRecHit/interface/HFRecHit.h"
#include "DataFormats/HcalRecHit/interface/HORecHit.h"
#include "DataFormats/HcalRecHit/interface/HcalRecHitCollections.h"
#include <string>
#include <map>
class CaloGeometry;
class CaloGeometryRecord;
class HCALRecHitAnalyzer : public DQMEDAnalyzer {
public:
explicit HCALRecHitAnalyzer(const edm::ParameterSet&);
void analyze(const edm::Event&, const edm::EventSetup&) override;
// virtual void beginJob(void);
// virtual void beginRun(const edm::Run&, const edm::EventSetup&);
void bookHistograms(DQMStore::IBooker&, edm::Run const&, edm::EventSetup const&) override;
void dqmBeginRun(const edm::Run&, const edm::EventSetup&) override;
private:
// Inputs from Configuration
edm::EDGetTokenT<HBHERecHitCollection> hBHERecHitsLabel_;
edm::EDGetTokenT<HFRecHitCollection> hFRecHitsLabel_;
edm::EDGetTokenT<HORecHitCollection> hORecHitsLabel_;
edm::ESGetToken<CaloGeometry, CaloGeometryRecord> caloGeomToken_;
bool debug_;
bool finebinning_;
std::string FolderName_;
// Helper Functions
void FillGeometry(const edm::EventSetup&);
int Nevents;
//histos
MonitorElement* hHCAL_ieta_iphi_HBMap;
MonitorElement* hHCAL_ieta_iphi_HEMap;
MonitorElement* hHCAL_ieta_iphi_HFMap;
MonitorElement* hHCAL_ieta_iphi_HOMap;
MonitorElement* hHCAL_ieta_iphi_etaMap;
MonitorElement* hHCAL_ieta_iphi_phiMap;
MonitorElement* hHCAL_ieta_detaMap;
MonitorElement* hHCAL_ieta_dphiMap;
MonitorElement* hHCAL_Nevents;
MonitorElement* hHCAL_D1_energy_ieta_iphi;
MonitorElement* hHCAL_D2_energy_ieta_iphi;
MonitorElement* hHCAL_D3_energy_ieta_iphi;
MonitorElement* hHCAL_D4_energy_ieta_iphi;
MonitorElement* hHCAL_D1_Minenergy_ieta_iphi;
MonitorElement* hHCAL_D2_Minenergy_ieta_iphi;
MonitorElement* hHCAL_D3_Minenergy_ieta_iphi;
MonitorElement* hHCAL_D4_Minenergy_ieta_iphi;
MonitorElement* hHCAL_D1_Maxenergy_ieta_iphi;
MonitorElement* hHCAL_D2_Maxenergy_ieta_iphi;
MonitorElement* hHCAL_D3_Maxenergy_ieta_iphi;
MonitorElement* hHCAL_D4_Maxenergy_ieta_iphi;
MonitorElement* hHCAL_D1_Occ_ieta_iphi;
MonitorElement* hHCAL_D2_Occ_ieta_iphi;
MonitorElement* hHCAL_D3_Occ_ieta_iphi;
MonitorElement* hHCAL_D4_Occ_ieta_iphi;
MonitorElement* hHCAL_D1_energyvsieta;
MonitorElement* hHCAL_D2_energyvsieta;
MonitorElement* hHCAL_D3_energyvsieta;
MonitorElement* hHCAL_D4_energyvsieta;
MonitorElement* hHCAL_D1_Minenergyvsieta;
MonitorElement* hHCAL_D2_Minenergyvsieta;
MonitorElement* hHCAL_D3_Minenergyvsieta;
MonitorElement* hHCAL_D4_Minenergyvsieta;
MonitorElement* hHCAL_D1_Maxenergyvsieta;
MonitorElement* hHCAL_D2_Maxenergyvsieta;
MonitorElement* hHCAL_D3_Maxenergyvsieta;
MonitorElement* hHCAL_D4_Maxenergyvsieta;
MonitorElement* hHCAL_D1_Occvsieta;
MonitorElement* hHCAL_D2_Occvsieta;
MonitorElement* hHCAL_D3_Occvsieta;
MonitorElement* hHCAL_D4_Occvsieta;
MonitorElement* hHCAL_D1_SETvsieta;
MonitorElement* hHCAL_D2_SETvsieta;
MonitorElement* hHCAL_D3_SETvsieta;
MonitorElement* hHCAL_D4_SETvsieta;
MonitorElement* hHCAL_D1_METvsieta;
MonitorElement* hHCAL_D2_METvsieta;
MonitorElement* hHCAL_D3_METvsieta;
MonitorElement* hHCAL_D4_METvsieta;
MonitorElement* hHCAL_D1_METPhivsieta;
MonitorElement* hHCAL_D2_METPhivsieta;
MonitorElement* hHCAL_D3_METPhivsieta;
MonitorElement* hHCAL_D4_METPhivsieta;
MonitorElement* hHCAL_D1_MExvsieta;
MonitorElement* hHCAL_D2_MExvsieta;
MonitorElement* hHCAL_D3_MExvsieta;
MonitorElement* hHCAL_D4_MExvsieta;
MonitorElement* hHCAL_D1_MEyvsieta;
MonitorElement* hHCAL_D2_MEyvsieta;
MonitorElement* hHCAL_D3_MEyvsieta;
MonitorElement* hHCAL_D4_MEyvsieta;
};
#endif
|
/*Performs a forward 8x8 Type-II DCT transform on blocks which overlap the
border of the picture region.
This method ONLY works with rectangular regions.
_border: A description of which pixels are inside the border.
_y: The buffer to store the result in.
This may be the same as _x.
_x: The input pixel values.
Pixel values outside the border will be ignored.*/
void oc_fdct8x8_border(const oc_border_info *_border,
ogg_int16_t _y[64],const ogg_int16_t _x[64]){
ogg_int16_t *in;
ogg_int16_t *out;
ogg_int16_t w[64];
ogg_int64_t mask;
const oc_extension_info *cext;
const oc_extension_info *rext;
int cmask;
int rmask;
int ri;
int ci;
rmask=cmask=0;
mask=_border->mask;
for(ri=0;ri<8;ri++){
cmask|=((mask&0xFF)!=0)<<ri;
rmask|=mask&0xFF;
mask>>=8;
}
if(cmask==0xFF)cext=NULL;
else for(cext=OC_EXTENSION_INFO;cext->mask!=cmask;){
if(++cext>=OC_EXTENSION_INFO+OC_NSHAPES){
oc_enc_fdct8x8_c(_y,_x);
return;
}
}
if(rmask==0xFF)rext=NULL;
else for(rext=OC_EXTENSION_INFO;rext->mask!=rmask;){
if(++rext>=OC_EXTENSION_INFO+OC_NSHAPES){
oc_enc_fdct8x8_c(_y,_x);
return;
}
}
for(ci=0;ci<64;ci++)w[ci]=_x[ci]<<2;
w[0]+=(w[0]!=0)+1;
w[1]++;
w[8]--;
in=w;
out=_y;
if(cext==NULL)for(ci=0;ci<8;ci++)oc_fdct8(out+(ci<<3),in+ci);
else for(ci=0;ci<8;ci++)if(rmask&(1<<ci))oc_fdct8_ext(out+(ci<<3),in+ci,cext);
in=_y;
out=w;
if(rext==NULL)for(ri=0;ri<8;ri++)oc_fdct8(out+(ri<<3),in+ri);
else for(ri=0;ri<8;ri++)oc_fdct8_ext(out+(ri<<3),in+ri,rext);
for(ci=0;ci<64;ci++)_y[ci]=w[ci]+2>>2;
} |
/*
==================
DeathmatchScoreboard
Draw instead of help message.
Note that it isn't that hard to overflow the 1400 byte message limit!
==================
*/
void DeathmatchScoreboard (edict_t *ent)
{
DeathmatchScoreboardMessage (ent, ent->enemy);
gi.unicast (ent, true);
} |
/**
* Download git repository of `package` locally.
*
* Returns 0 if the repo is already downloaded, the exit code of git otherwise.
*/
int download_package(struct Package *package)
{
if (access(package->name, F_OK) == 0)
return 0;
char link[PATH_MAX];
snprintf(link, sizeof(link), "%s/%s.git", AUR_LINK, package->name);
char *git_clone_args[] = {"git", "clone", "--depth=1", "--quiet", link, NULL};
printf("==> Downloading package '%s'\n", package->name);
return fork_program("git", git_clone_args);
} |
// See if we can recover from tunnel creation issues.
static bool handleRequestTunnelFailure(uint8_t tunnelIndex, EmberAfPluginTunnelingClientStatus status)
{
uint8_t i;
emberAfDebugPrintln("CHF: handleRequestTunnelFailure 0x%x, 0x%x",
tunnelIndex, status);
if (status == EMBER_AF_PLUGIN_TUNNELING_CLIENT_BUSY) {
tunnels[tunnelIndex].state = REQUEST_PENDING_TUNNEL;
tunnels[tunnelIndex].timeoutMSec = halCommonGetInt32uMillisecondTick()
+ (MILLISECOND_TICKS_PER_SECOND * 180);
slxu_zigbee_event_set_active(tunnelEventControl);
emberAfPluginCommsHubFunctionPrintln("CHF: Busy status received from node ID 0x%2x", tunnels[tunnelIndex].remoteNodeId);
return true;
} else if (status == EMBER_AF_PLUGIN_TUNNELING_CLIENT_NO_MORE_TUNNEL_IDS) {
bool retryRequest = false;
for (i = 0; i < EMBER_AF_PLUGIN_COMMS_HUB_FUNCTION_TUNNEL_LIMIT; i++) {
if (i != tunnelIndex && tunnels[i].remoteNodeId == tunnels[tunnelIndex].remoteNodeId) {
retryRequest = true;
emAfPluginCommsHubFunctionTunnelDestroy(tunnels[i].remoteDeviceId);
}
}
if (retryRequest) {
tunnels[tunnelIndex].state = REQUEST_PENDING_TUNNEL;
tunnels[tunnelIndex].timeoutMSec = halCommonGetInt32uMillisecondTick()
+ (MILLISECOND_TICKS_PER_SECOND * 5);
slxu_zigbee_event_set_active(tunnelEventControl);
return true;
}
emberAfPluginCommsHubFunctionPrintln("%p%p%p",
"Error: ",
"Tunnel Create failed: ",
"No more tunnel ids");
tunnels[tunnelIndex].state = CLOSED_TUNNEL;
return false;
}
emberAfPluginCommsHubFunctionPrintln("%p%p%p0x%x",
"Error: ",
"Tunnel Create failed: ",
"Tunneling Client Status: ",
status);
tunnels[tunnelIndex].state = CLOSED_TUNNEL;
return false;
} |
/*
* SIMD optimized non-power-of-two MDCT functions
*
* Copyright (C) 2017 Rostislav Pehlivanov <atomnuker@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/mdct15.h"
void ff_mdct15_postreindex_sse3(FFTComplex *out, FFTComplex *in, FFTComplex *exp, int *lut, ptrdiff_t len8);
void ff_mdct15_postreindex_avx2(FFTComplex *out, FFTComplex *in, FFTComplex *exp, int *lut, ptrdiff_t len8);
void ff_fft15_avx(FFTComplex *out, FFTComplex *in, FFTComplex *exptab, ptrdiff_t stride);
static void perm_twiddles(MDCT15Context *s)
{
int k;
FFTComplex tmp[30];
/* 5-point FFT twiddles */
s->exptab[60].re = s->exptab[60].im = s->exptab[19].re;
s->exptab[61].re = s->exptab[61].im = s->exptab[19].im;
s->exptab[62].re = s->exptab[62].im = s->exptab[20].re;
s->exptab[63].re = s->exptab[63].im = s->exptab[20].im;
/* 15-point FFT twiddles */
for (k = 0; k < 5; k++) {
tmp[6*k + 0] = s->exptab[k + 0];
tmp[6*k + 2] = s->exptab[k + 5];
tmp[6*k + 4] = s->exptab[k + 10];
tmp[6*k + 1] = s->exptab[2 * (k + 0)];
tmp[6*k + 3] = s->exptab[2 * (k + 5)];
tmp[6*k + 5] = s->exptab[2 * k + 5 ];
}
for (k = 0; k < 6; k++) {
FFTComplex ac_exp[] = {
{ tmp[6*1 + k].re, tmp[6*1 + k].re },
{ tmp[6*2 + k].re, tmp[6*2 + k].re },
{ tmp[6*3 + k].re, tmp[6*3 + k].re },
{ tmp[6*4 + k].re, tmp[6*4 + k].re },
{ tmp[6*1 + k].im, -tmp[6*1 + k].im },
{ tmp[6*2 + k].im, -tmp[6*2 + k].im },
{ tmp[6*3 + k].im, -tmp[6*3 + k].im },
{ tmp[6*4 + k].im, -tmp[6*4 + k].im },
};
memcpy(s->exptab + 8*k, ac_exp, 8*sizeof(FFTComplex));
}
/* Specialcase when k = 0 */
for (k = 0; k < 3; k++) {
FFTComplex dc_exp[] = {
{ tmp[2*k + 0].re, -tmp[2*k + 0].im },
{ tmp[2*k + 0].im, tmp[2*k + 0].re },
{ tmp[2*k + 1].re, -tmp[2*k + 1].im },
{ tmp[2*k + 1].im, tmp[2*k + 1].re },
};
memcpy(s->exptab + 8*6 + 4*k, dc_exp, 4*sizeof(FFTComplex));
}
}
av_cold void ff_mdct15_init_x86(MDCT15Context *s)
{
int adjust_twiddles = 0;
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_SSE3(cpu_flags))
s->postreindex = ff_mdct15_postreindex_sse3;
if (ARCH_X86_64 && EXTERNAL_AVX(cpu_flags)) {
s->fft15 = ff_fft15_avx;
adjust_twiddles = 1;
}
if (ARCH_X86_64 && EXTERNAL_AVX2_FAST(cpu_flags))
s->postreindex = ff_mdct15_postreindex_avx2;
if (adjust_twiddles)
perm_twiddles(s);
}
|
/* practice with Dukkha */
#include <stdio.h>
#define N 100000
int tt[N * 2], n;
void update(int l, int r, int q) {
for (l += n, r += n; l <= r; l >>= 1, r >>= 1) {
if ((l & 1) == 1)
tt[l++] |= q;
if ((r & 1) == 0)
tt[r--] |= q;
}
}
void flush() {
int i;
for (i = 1; i < n; i++) {
tt[i << 1] |= tt[i];
tt[i << 1 | 1] |= tt[i];
}
for (i = n - 1; i >= 1; i--)
tt[i] = tt[i << 1] & tt[i << 1 | 1];
}
int query(int l, int r) {
int x = 0x7fffffff;
for (l += n, r += n; l <= r; l >>= 1, r >>= 1) {
if ((l & 1) == 1)
x &= tt[l++];
if ((r & 1) == 0)
x &= tt[r--];
}
return x;
}
int main() {
static int ll[N], rr[N], qq[N];
int m, h, i;
scanf("%d%d", &n, &m);
for (h = 0; h < m; h++) {
scanf("%d%d%d", &ll[h], &rr[h], &qq[h]), ll[h]--, rr[h]--;
update(ll[h], rr[h], qq[h]);
}
flush();
for (h = 0; h < m; h++)
if (query(ll[h], rr[h]) != qq[h]) {
printf("NO\n");
return 0;
}
printf("YES\n");
for (i = n; i < n + n; i++)
printf("%d ", tt[i]);
printf("\n");
return 0;
}
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
*
* This module is not a complete tagger implementation. It only provides
* primitives for taggers that rely on 802.1Q VLAN tags to use. The
* dsa_8021q_netdev_ops is registered for API compliance and not used
* directly by callers.
*/
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/dsa/8021q.h>
#include "dsa_priv.h"
/* Binary structure of the fake 12-bit VID field (when the TPID is
* ETH_P_DSA_8021Q):
*
* | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
* +-----------+-----+-----------------+-----------+-----------------------+
* | DIR | SVL | SWITCH_ID | SUBVLAN | PORT |
* +-----------+-----+-----------------+-----------+-----------------------+
*
* DIR - VID[11:10]:
* Direction flags.
* * 1 (0b01) for RX VLAN,
* * 2 (0b10) for TX VLAN.
* These values make the special VIDs of 0, 1 and 4095 to be left
* unused by this coding scheme.
*
* SVL/SUBVLAN - { VID[9], VID[5:4] }:
* Sub-VLAN encoding. Valid only when DIR indicates an RX VLAN.
* * 0 (0b000): Field does not encode a sub-VLAN, either because
* received traffic is untagged, PVID-tagged or because a second
* VLAN tag is present after this tag and not inside of it.
* * 1 (0b001): Received traffic is tagged with a VID value private
* to the host. This field encodes the index in the host's lookup
* table through which the value of the ingress VLAN ID can be
* recovered.
* * 2 (0b010): Field encodes a sub-VLAN.
* ...
* * 7 (0b111): Field encodes a sub-VLAN.
* When DIR indicates a TX VLAN, SUBVLAN must be transmitted as zero
* (by the host) and ignored on receive (by the switch).
*
* SWITCH_ID - VID[8:6]:
* Index of switch within DSA tree. Must be between 0 and 7.
*
* PORT - VID[3:0]:
* Index of switch port. Must be between 0 and 15.
*/
#define DSA_8021Q_DIR_SHIFT 10
#define DSA_8021Q_DIR_MASK GENMASK(11, 10)
#define DSA_8021Q_DIR(x) (((x) << DSA_8021Q_DIR_SHIFT) & \
DSA_8021Q_DIR_MASK)
#define DSA_8021Q_DIR_RX DSA_8021Q_DIR(1)
#define DSA_8021Q_DIR_TX DSA_8021Q_DIR(2)
#define DSA_8021Q_SWITCH_ID_SHIFT 6
#define DSA_8021Q_SWITCH_ID_MASK GENMASK(8, 6)
#define DSA_8021Q_SWITCH_ID(x) (((x) << DSA_8021Q_SWITCH_ID_SHIFT) & \
DSA_8021Q_SWITCH_ID_MASK)
#define DSA_8021Q_SUBVLAN_HI_SHIFT 9
#define DSA_8021Q_SUBVLAN_HI_MASK GENMASK(9, 9)
#define DSA_8021Q_SUBVLAN_LO_SHIFT 4
#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(5, 4)
#define DSA_8021Q_SUBVLAN_HI(x) (((x) & GENMASK(2, 2)) >> 2)
#define DSA_8021Q_SUBVLAN_LO(x) ((x) & GENMASK(1, 0))
#define DSA_8021Q_SUBVLAN(x) \
(((DSA_8021Q_SUBVLAN_LO(x) << DSA_8021Q_SUBVLAN_LO_SHIFT) & \
DSA_8021Q_SUBVLAN_LO_MASK) | \
((DSA_8021Q_SUBVLAN_HI(x) << DSA_8021Q_SUBVLAN_HI_SHIFT) & \
DSA_8021Q_SUBVLAN_HI_MASK))
#define DSA_8021Q_PORT_SHIFT 0
#define DSA_8021Q_PORT_MASK GENMASK(3, 0)
#define DSA_8021Q_PORT(x) (((x) << DSA_8021Q_PORT_SHIFT) & \
DSA_8021Q_PORT_MASK)
/* Returns the VID to be inserted into the frame from xmit for switch steering
* instructions on egress. Encodes switch ID and port ID.
*/
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
{
return DSA_8021Q_DIR_TX | DSA_8021Q_SWITCH_ID(ds->index) |
DSA_8021Q_PORT(port);
}
EXPORT_SYMBOL_GPL(dsa_8021q_tx_vid);
/* Returns the VID that will be installed as pvid for this switch port, sent as
* tagged egress towards the CPU port and decoded by the rcv function.
*/
u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port)
{
return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(ds->index) |
DSA_8021Q_PORT(port);
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_vid);
u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan)
{
return DSA_8021Q_DIR_RX | DSA_8021Q_SWITCH_ID(ds->index) |
DSA_8021Q_PORT(port) | DSA_8021Q_SUBVLAN(subvlan);
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_vid_subvlan);
/* Returns the decoded switch ID from the RX VID. */
int dsa_8021q_rx_switch_id(u16 vid)
{
return (vid & DSA_8021Q_SWITCH_ID_MASK) >> DSA_8021Q_SWITCH_ID_SHIFT;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_switch_id);
/* Returns the decoded port ID from the RX VID. */
int dsa_8021q_rx_source_port(u16 vid)
{
return (vid & DSA_8021Q_PORT_MASK) >> DSA_8021Q_PORT_SHIFT;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_source_port);
/* Returns the decoded subvlan from the RX VID. */
u16 dsa_8021q_rx_subvlan(u16 vid)
{
u16 svl_hi, svl_lo;
svl_hi = (vid & DSA_8021Q_SUBVLAN_HI_MASK) >>
DSA_8021Q_SUBVLAN_HI_SHIFT;
svl_lo = (vid & DSA_8021Q_SUBVLAN_LO_MASK) >>
DSA_8021Q_SUBVLAN_LO_SHIFT;
return (svl_hi << 2) | svl_lo;
}
EXPORT_SYMBOL_GPL(dsa_8021q_rx_subvlan);
bool vid_is_dsa_8021q_rxvlan(u16 vid)
{
return (vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_RX;
}
EXPORT_SYMBOL_GPL(vid_is_dsa_8021q_rxvlan);
bool vid_is_dsa_8021q_txvlan(u16 vid)
{
return (vid & DSA_8021Q_DIR_MASK) == DSA_8021Q_DIR_TX;
}
EXPORT_SYMBOL_GPL(vid_is_dsa_8021q_txvlan);
bool vid_is_dsa_8021q(u16 vid)
{
return vid_is_dsa_8021q_rxvlan(vid) || vid_is_dsa_8021q_txvlan(vid);
}
EXPORT_SYMBOL_GPL(vid_is_dsa_8021q);
/* If @enabled is true, installs @vid with @flags into the switch port's HW
* filter.
* If @enabled is false, deletes @vid (ignores @flags) from the port. Had the
* user explicitly configured this @vid through the bridge core, then the @vid
* is installed again, but this time with the flags from the bridge layer.
*/
static int dsa_8021q_vid_apply(struct dsa_8021q_context *ctx, int port, u16 vid,
u16 flags, bool enabled)
{
struct dsa_port *dp = dsa_to_port(ctx->ds, port);
if (enabled)
return ctx->ops->vlan_add(ctx->ds, dp->index, vid, flags);
return ctx->ops->vlan_del(ctx->ds, dp->index, vid);
}
/* RX VLAN tagging (left) and TX VLAN tagging (right) setup shown for a single
* front-panel switch port (here swp0).
*
* Port identification through VLAN (802.1Q) tags has different requirements
* for it to work effectively:
* - On RX (ingress from network): each front-panel port must have a pvid
* that uniquely identifies it, and the egress of this pvid must be tagged
* towards the CPU port, so that software can recover the source port based
* on the VID in the frame. But this would only work for standalone ports;
* if bridged, this VLAN setup would break autonomous forwarding and would
* force all switched traffic to pass through the CPU. So we must also make
* the other front-panel ports members of this VID we're adding, albeit
* we're not making it their PVID (they'll still have their own).
* By the way - just because we're installing the same VID in multiple
* switch ports doesn't mean that they'll start to talk to one another, even
* while not bridged: the final forwarding decision is still an AND between
* the L2 forwarding information (which is limiting forwarding in this case)
* and the VLAN-based restrictions (of which there are none in this case,
* since all ports are members).
* - On TX (ingress from CPU and towards network) we are faced with a problem.
* If we were to tag traffic (from within DSA) with the port's pvid, all
* would be well, assuming the switch ports were standalone. Frames would
* have no choice but to be directed towards the correct front-panel port.
* But because we also want the RX VLAN to not break bridging, then
* inevitably that means that we have to give them a choice (of what
* front-panel port to go out on), and therefore we cannot steer traffic
* based on the RX VID. So what we do is simply install one more VID on the
* front-panel and CPU ports, and profit off of the fact that steering will
* work just by virtue of the fact that there is only one other port that's
* a member of the VID we're tagging the traffic with - the desired one.
*
* So at the end, each front-panel port will have one RX VID (also the PVID),
* the RX VID of all other front-panel ports, and one TX VID. Whereas the CPU
* port will have the RX and TX VIDs of all front-panel ports, and on top of
* that, is also tagged-input and tagged-output (VLAN trunk).
*
* CPU port CPU port
* +-------------+-----+-------------+ +-------------+-----+-------------+
* | RX VID | | | | TX VID | | |
* | of swp0 | | | | of swp0 | | |
* | +-----+ | | +-----+ |
* | ^ T | | | Tagged |
* | | | | | ingress |
* | +-------+---+---+-------+ | | +-----------+ |
* | | | | | | | | Untagged |
* | | U v U v U v | | v egress |
* | +-----+ +-----+ +-----+ +-----+ | | +-----+ +-----+ +-----+ +-----+ |
* | | | | | | | | | | | | | | | | | | | |
* | |PVID | | | | | | | | | | | | | | | | | |
* +-+-----+-+-----+-+-----+-+-----+-+ +-+-----+-+-----+-+-----+-+-----+-+
* swp0 swp1 swp2 swp3 swp0 swp1 swp2 swp3
*/
static int dsa_8021q_setup_port(struct dsa_8021q_context *ctx, int port,
bool enabled)
{
int upstream = dsa_upstream_port(ctx->ds, port);
u16 rx_vid = dsa_8021q_rx_vid(ctx->ds, port);
u16 tx_vid = dsa_8021q_tx_vid(ctx->ds, port);
struct net_device *master;
int i, err, subvlan;
/* The CPU port is implicitly configured by
* configuring the front-panel ports
*/
if (!dsa_is_user_port(ctx->ds, port))
return 0;
master = dsa_to_port(ctx->ds, port)->cpu_dp->master;
/* Add this user port's RX VID to the membership list of all others
* (including itself). This is so that bridging will not be hindered.
* L2 forwarding rules still take precedence when there are no VLAN
* restrictions, so there are no concerns about leaking traffic.
*/
for (i = 0; i < ctx->ds->num_ports; i++) {
u16 flags;
if (i == upstream)
continue;
else if (i == port)
/* The RX VID is pvid on this port */
flags = BRIDGE_VLAN_INFO_UNTAGGED |
BRIDGE_VLAN_INFO_PVID;
else
/* The RX VID is a regular VLAN on all others */
flags = BRIDGE_VLAN_INFO_UNTAGGED;
err = dsa_8021q_vid_apply(ctx, i, rx_vid, flags, enabled);
if (err) {
dev_err(ctx->ds->dev,
"Failed to apply RX VID %d to port %d: %d\n",
rx_vid, port, err);
return err;
}
}
/* CPU port needs to see this port's RX VID
* as tagged egress.
*/
err = dsa_8021q_vid_apply(ctx, upstream, rx_vid, 0, enabled);
if (err) {
dev_err(ctx->ds->dev,
"Failed to apply RX VID %d to port %d: %d\n",
rx_vid, port, err);
return err;
}
/* Add to the master's RX filter not only @rx_vid, but in fact
* the entire subvlan range, just in case this DSA switch might
* want to use sub-VLANs.
*/
for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) {
u16 vid = dsa_8021q_rx_vid_subvlan(ctx->ds, port, subvlan);
if (enabled)
vlan_vid_add(master, ctx->proto, vid);
else
vlan_vid_del(master, ctx->proto, vid);
}
/* Finally apply the TX VID on this port and on the CPU port */
err = dsa_8021q_vid_apply(ctx, port, tx_vid, BRIDGE_VLAN_INFO_UNTAGGED,
enabled);
if (err) {
dev_err(ctx->ds->dev,
"Failed to apply TX VID %d on port %d: %d\n",
tx_vid, port, err);
return err;
}
err = dsa_8021q_vid_apply(ctx, upstream, tx_vid, 0, enabled);
if (err) {
dev_err(ctx->ds->dev,
"Failed to apply TX VID %d on port %d: %d\n",
tx_vid, upstream, err);
return err;
}
return err;
}
int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled)
{
int rc, port;
ASSERT_RTNL();
for (port = 0; port < ctx->ds->num_ports; port++) {
rc = dsa_8021q_setup_port(ctx, port, enabled);
if (rc < 0) {
dev_err(ctx->ds->dev,
"Failed to setup VLAN tagging for port %d: %d\n",
port, rc);
return rc;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(dsa_8021q_setup);
static int dsa_8021q_crosschip_link_apply(struct dsa_8021q_context *ctx,
int port,
struct dsa_8021q_context *other_ctx,
int other_port, bool enabled)
{
u16 rx_vid = dsa_8021q_rx_vid(ctx->ds, port);
/* @rx_vid of local @ds port @port goes to @other_port of
* @other_ds
*/
return dsa_8021q_vid_apply(other_ctx, other_port, rx_vid,
BRIDGE_VLAN_INFO_UNTAGGED, enabled);
}
static int dsa_8021q_crosschip_link_add(struct dsa_8021q_context *ctx, int port,
struct dsa_8021q_context *other_ctx,
int other_port)
{
struct dsa_8021q_crosschip_link *c;
list_for_each_entry(c, &ctx->crosschip_links, list) {
if (c->port == port && c->other_ctx == other_ctx &&
c->other_port == other_port) {
refcount_inc(&c->refcount);
return 0;
}
}
dev_dbg(ctx->ds->dev,
"adding crosschip link from port %d to %s port %d\n",
port, dev_name(other_ctx->ds->dev), other_port);
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
c->port = port;
c->other_ctx = other_ctx;
c->other_port = other_port;
refcount_set(&c->refcount, 1);
list_add(&c->list, &ctx->crosschip_links);
return 0;
}
static void dsa_8021q_crosschip_link_del(struct dsa_8021q_context *ctx,
struct dsa_8021q_crosschip_link *c,
bool *keep)
{
*keep = !refcount_dec_and_test(&c->refcount);
if (*keep)
return;
dev_dbg(ctx->ds->dev,
"deleting crosschip link from port %d to %s port %d\n",
c->port, dev_name(c->other_ctx->ds->dev), c->other_port);
list_del(&c->list);
kfree(c);
}
/* Make traffic from local port @port be received by remote port @other_port.
* This means that our @rx_vid needs to be installed on @other_ds's upstream
* and user ports. The user ports should be egress-untagged so that they can
* pop the dsa_8021q VLAN. But the @other_upstream can be either egress-tagged
* or untagged: it doesn't matter, since it should never egress a frame having
* our @rx_vid.
*/
int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
struct dsa_8021q_context *other_ctx,
int other_port)
{
/* @other_upstream is how @other_ds reaches us. If we are part
* of disjoint trees, then we are probably connected through
* our CPU ports. If we're part of the same tree though, we should
* probably use dsa_towards_port.
*/
int other_upstream = dsa_upstream_port(other_ctx->ds, other_port);
int rc;
rc = dsa_8021q_crosschip_link_add(ctx, port, other_ctx, other_port);
if (rc)
return rc;
rc = dsa_8021q_crosschip_link_apply(ctx, port, other_ctx,
other_port, true);
if (rc)
return rc;
rc = dsa_8021q_crosschip_link_add(ctx, port, other_ctx, other_upstream);
if (rc)
return rc;
return dsa_8021q_crosschip_link_apply(ctx, port, other_ctx,
other_upstream, true);
}
EXPORT_SYMBOL_GPL(dsa_8021q_crosschip_bridge_join);
int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
struct dsa_8021q_context *other_ctx,
int other_port)
{
int other_upstream = dsa_upstream_port(other_ctx->ds, other_port);
struct dsa_8021q_crosschip_link *c, *n;
list_for_each_entry_safe(c, n, &ctx->crosschip_links, list) {
if (c->port == port && c->other_ctx == other_ctx &&
(c->other_port == other_port ||
c->other_port == other_upstream)) {
struct dsa_8021q_context *other_ctx = c->other_ctx;
int other_port = c->other_port;
bool keep;
int rc;
dsa_8021q_crosschip_link_del(ctx, c, &keep);
if (keep)
continue;
rc = dsa_8021q_crosschip_link_apply(ctx, port,
other_ctx,
other_port,
false);
if (rc)
return rc;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(dsa_8021q_crosschip_bridge_leave);
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci)
{
/* skb->data points at skb_mac_header, which
* is fine for vlan_insert_tag.
*/
return vlan_insert_tag(skb, htons(tpid), tci);
}
EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
MODULE_LICENSE("GPL v2");
|
/**
* \brief this function registers unit tests for DetectId
*/
void ThreadMacrosRegisterTests(void)
{
#ifdef UNITTESTS
UtRegisterTest("ThreadMacrosTest01Mutex", ThreadMacrosTest01Mutex);
UtRegisterTest("ThreadMacrosTest02Spinlocks", ThreadMacrosTest02Spinlocks);
UtRegisterTest("ThreadMacrosTest03RWLocks", ThreadMacrosTest03RWLocks);
UtRegisterTest("ThreadMacrosTest04RWLocks", ThreadMacrosTest04RWLocks);
#endif
} |
// reads bits from an input file and returns them into *bit
//
bool read_bit(int infile, uint8_t *bit) {
static int end = -1;
if (!bitindex) {
int reads = read_bytes(infile, bitbuff, BLOCK);
if (reads < BLOCK) {
end = reads + 1;
}
}
*bit = get_bit(bitbuff, bitindex);
bitindex = (bitindex + 1) % (BITS_PER_BYTE * BLOCK);
return bitindex == (BITS_PER_BYTE * end) ? true : false;
} |
/*---------------------------------------------------------------------------
* randomLevel - Returns a random level in the range 0..MaxLevel.
*---------------------------------------------------------------------------*/
static int randomLevel(void)
{
register int level = 0;
register int b;
do {
b = randomBits & 3;
if (!b) level++;
randomBits >>= 2;
if (--randomsLeft == 0) {
randomBits = random();
randomsLeft = BitsInRandom / 2;
}
} while (!b);
return level > MaxLevel ? MaxLevel : level;
} |
/**
* Sets the enmState member atomically.
*
* Used for all updates.
*
* @param pThis The instance.
* @param enmNewState The new value.
*/
DECLINLINE(void) vboxNetAdpSetState(PVBOXNETADP pThis, VBOXNETADPSTATE enmNewState)
{
Log(("vboxNetAdpSetState: pThis=%p, state change: %d -> %d.\n", pThis, vboxNetAdpGetState(pThis), enmNewState));
ASMAtomicWriteU32((uint32_t volatile *)&pThis->enmState, enmNewState);
} |