content
stringlengths
19
48.2k
/* * Return TRUE if redrawing should currently be done. */ int redrawing() { return (!RedrawingDisabled && !(p_lz && char_avail() && !KeyTyped && !do_redraw)); }
/** * attribute_container_add_device - see if any container is interested in dev * * @dev: device to add attributes to * @fn: function to trigger addition of class device. * * This function allocates storage for the class device(s) to be * attached to dev (one for each matching attribute_container). If no * fn is provided, the code will simply register the class device via * class_device_add. If a function is provided, it is expected to add * the class device at the appropriate time. One of the things that * might be necessary is to allocate and initialise the classdev and * then add it a later time. To do this, call this routine for * allocation and initialisation and then use * attribute_container_device_trigger() to call class_device_add() on * it. Note: after this, the class device contains a reference to dev * which is not relinquished until the release of the classdev. */ void attribute_container_add_device(struct device *dev, int (*fn)(struct attribute_container *, struct device *, struct class_device *)) { struct attribute_container *cont; mutex_lock(&attribute_container_mutex); list_for_each_entry(cont, &attribute_container_list, node) { struct internal_container *ic; if (attribute_container_no_classdevs(cont)) continue; if (!cont->match(cont, dev)) continue; ic = kzalloc(sizeof(*ic), GFP_KERNEL); if (!ic) { dev_printk(KERN_ERR, dev, "failed to allocate class container\n"); continue; } ic->cont = cont; class_device_initialize(&ic->classdev); ic->classdev.dev = get_device(dev); ic->classdev.class = cont->class; cont->class->release = attribute_container_release; strcpy(ic->classdev.class_id, dev->bus_id); if (fn) fn(cont, dev, &ic->classdev); else attribute_container_add_class_device(&ic->classdev); klist_add_tail(&ic->node, &cont->containers); } mutex_unlock(&attribute_container_mutex); }
#include "../../git-compat-util.h" int win32_has_dos_drive_prefix(const char *path) { int i; /* * Does it start with an ASCII letter (i.e. highest bit not set), * followed by a colon? */ if (!(0x80 & (unsigned char)*path)) return *path && path[1] == ':' ? 2 : 0; /* * While drive letters must be letters of the English alphabet, it is * possible to assign virtually _any_ Unicode character via `subst` as * a drive letter to "virtual drives". Even `1`, or `ä`. Or fun stuff * like this: * * subst ֍: %USERPROFILE%\Desktop */ for (i = 1; i < 4 && (0x80 & (unsigned char)path[i]); i++) ; /* skip first UTF-8 character */ return path[i] == ':' ? i + 1 : 0; } int win32_skip_dos_drive_prefix(char **path) { int ret = has_dos_drive_prefix(*path); *path += ret; return ret; } int win32_offset_1st_component(const char *path) { char *pos = (char *)path; /* unc paths */ if (!skip_dos_drive_prefix(&pos) && is_dir_sep(pos[0]) && is_dir_sep(pos[1])) { /* skip server name */ pos = strpbrk(pos + 2, "\\/"); if (!pos) return 0; /* Error: malformed unc path */ do { pos++; } while (*pos && !is_dir_sep(*pos)); } return pos + is_dir_sep(*pos) - path; }
/* * Second stage of a quiesce. The data is already synced, now we have to take * care of the metadata. New transactions are already blocked, so we need to * wait for any remaining transactions to drain out before proceding. */ void xfs_quiesce_attr( struct xfs_mount *mp) { int error = 0; while (atomic_read(&mp->m_active_trans) > 0) delay(100); xfs_quiesce_fs(mp); WARN_ON(atomic_read(&mp->m_active_trans) != 0); error = xfs_log_sbcount(mp, 1); if (error) xfs_fs_cmn_err(CE_WARN, mp, "xfs_attr_quiesce: failed to log sb changes. " "Frozen image may not be consistent."); xfs_log_unmount_write(mp); xfs_unmountfs_writesb(mp); }
/* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _ASM_ARC_MMU_H #define _ASM_ARC_MMU_H #if defined(CONFIG_ARC_MMU_V1) #define CONFIG_ARC_MMU_VER 1 #elif defined(CONFIG_ARC_MMU_V2) #define CONFIG_ARC_MMU_VER 2 #elif defined(CONFIG_ARC_MMU_V3) #define CONFIG_ARC_MMU_VER 3 #endif /* MMU Management regs */ #define ARC_REG_MMU_BCR 0x06f #define ARC_REG_TLBPD0 0x405 #define ARC_REG_TLBPD1 0x406 #define ARC_REG_TLBINDEX 0x407 #define ARC_REG_TLBCOMMAND 0x408 #define ARC_REG_PID 0x409 #define ARC_REG_SCRATCH_DATA0 0x418 /* Bits in MMU PID register */ #define MMU_ENABLE (1 << 31) /* Enable MMU for process */ /* Error code if probe fails */ #define TLB_LKUP_ERR 0x80000000 #define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001) /* TLB Commands */ #define TLBWrite 0x1 #define TLBRead 0x2 #define TLBGetIndex 0x3 #define TLBProbe 0x4 #if (CONFIG_ARC_MMU_VER >= 2) #define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */ #define TLBIVUTLB 0x6 /* explicitly inv uTLBs */ #endif #ifndef __ASSEMBLY__ typedef struct { unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */ } mm_context_t; #ifdef CONFIG_ARC_DBG_TLB_PARANOIA void tlb_paranoid_check(unsigned int mm_asid, unsigned long address); #else #define tlb_paranoid_check(a, b) #endif void arc_mmu_init(void); extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); void read_decode_mmu_bcr(void); #endif /* !__ASSEMBLY__ */ #endif
/* * The following inlined functions return an attribute of the interface with * the given interface number. */ static inline void if_get_ethernet(int ifnum, ethaddr_t ethaddr) { if_entry_t* ife; ASSERT_IFNUM(ifnum); ife = IPTABLE_GETIFP(ifnum); memcpy(ethaddr, ife->ethaddr, 6); }
/** * @brief Initialize a ADUC_InstallInfo object. Caller must free using ADUC_InstallInfo_UnInit(). * * @param info Object to initialize. * @param workFolder Sandbox to use for install, can be NULL. * @return _Bool True on success. */ _Bool ADUC_InstallInfo_Init(ADUC_InstallInfo* info, const char* workFolder) { _Bool succeeded = false; memset(info, 0, sizeof(*info)); if (workFolder != NULL) { if (mallocAndStrcpy_s(&(info->WorkFolder), workFolder) != 0) { goto done; } } succeeded = true; done: if (!succeeded) { ADUC_InstallInfo_UnInit(info); } return succeeded; }
/*********************************************************************** // LZO1A compress public entry point. ************************************************************************/ LZO_PUBLIC(int) lzo1a_compress ( const lzo_bytep in , lzo_uint in_len, lzo_bytep out, lzo_uintp out_len, lzo_voidp wrkmem ) { int r = LZO_E_OK; #if defined(LZO_COLLECT_STATS) lzo_memset(lzo_stats,0,sizeof(*lzo_stats)); lzo_stats->rbits = RBITS; lzo_stats->clevel = CLEVEL; lzo_stats->dbits = DBITS; lzo_stats->lbits = LBITS; lzo_stats->min_match_short = MIN_MATCH_SHORT; lzo_stats->max_match_short = MAX_MATCH_SHORT; lzo_stats->min_match_long = MIN_MATCH_LONG; lzo_stats->max_match_long = MAX_MATCH_LONG; lzo_stats->min_offset = MIN_OFFSET; lzo_stats->max_offset = MAX_OFFSET; lzo_stats->r0min = R0MIN; lzo_stats->r0fast = R0FAST; lzo_stats->r0max = R0MAX; lzo_stats->in_len = in_len; #endif if (in_len <= 0) *out_len = 0; else if (in_len <= MIN_MATCH_LONG + DVAL_LEN + 1) { #if defined(LZO_RETURN_IF_NOT_COMPRESSIBLE) r = LZO_E_NOT_COMPRESSIBLE; #else *out_len = pd(store_run(out,in,in_len), out); #endif } else r = do_compress(in,in_len,out,out_len,wrkmem); #if defined(LZO_COLLECT_STATS) lzo_stats->short_matches -= lzo_stats->r1_matches; lzo_stats->short_match[MIN_MATCH] -= lzo_stats->r1_matches; lzo_stats->out_len = *out_len; #endif return r; }
/// Return the index of the min level vertex inline int minLevelVertex() { int level = getVl(0); int index = 0; if (getVl(1) < level) { level = getVl(1); index = 1; } if (getVl(2) < level) { level = getVl(2); index = 2; } return index; }
/* Called once from parse.y if we are going to use readline. */ void initialize_readline () { if (bash_readline_initialized) return; rl_terminal_name = get_string_value ("TERM"); rl_instream = stdin; rl_outstream = stderr; rl_readline_name = "Bash"; rl_add_defun ("shell-expand-line", (Function *)shell_expand_line, -1); rl_bind_key_in_map (CTRL('E'), (Function *)shell_expand_line, emacs_meta_keymap); rl_add_defun ("history-expand-line", (Function *)history_expand_line, -1); rl_bind_key_in_map ('^', (Function *)history_expand_line, emacs_meta_keymap); rl_add_defun ("insert-last-argument", rl_yank_last_arg, -1); rl_add_defun ("operate-and-get-next", (Function *)operate_and_get_next, CTRL('O')); rl_add_defun ("display-shell-version", (Function *)display_shell_version, -1); rl_bind_key_in_map (CTRL ('V'), (Function *)display_shell_version, emacs_ctlx_keymap); rl_unbind_key_in_map (CTRL('J'), emacs_meta_keymap); rl_unbind_key_in_map (CTRL('M'), emacs_meta_keymap); #if defined (VI_MODE) rl_unbind_key_in_map (CTRL('E'), vi_movement_keymap); #endif #if defined (BRACE_COMPLETION) rl_add_defun ("complete-into-braces", bash_brace_completion, -1); rl_bind_key_in_map ('{', bash_brace_completion, emacs_meta_keymap); #endif #if defined (SPECIFIC_COMPLETION_FUNCTIONS) rl_add_defun ("complete-filename", bash_complete_filename, -1); rl_bind_key_in_map ('/', bash_complete_filename, emacs_meta_keymap); rl_add_defun ("possible-filename-completions", bash_possible_filename_completions, -1); rl_bind_key_in_map ('/', bash_possible_filename_completions, emacs_ctlx_keymap); rl_add_defun ("complete-username", bash_complete_username, -1); rl_bind_key_in_map ('~', bash_complete_username, emacs_meta_keymap); rl_add_defun ("possible-username-completions", bash_possible_username_completions, -1); rl_bind_key_in_map ('~', bash_possible_username_completions, emacs_ctlx_keymap); rl_add_defun ("complete-hostname", bash_complete_hostname, -1); rl_bind_key_in_map ('@', bash_complete_hostname, emacs_meta_keymap); rl_add_defun ("possible-hostname-completions", bash_possible_hostname_completions, -1); rl_bind_key_in_map ('@', bash_possible_hostname_completions, emacs_ctlx_keymap); rl_add_defun ("complete-variable", bash_complete_variable, -1); rl_bind_key_in_map ('$', bash_complete_variable, emacs_meta_keymap); rl_add_defun ("possible-variable-completions", bash_possible_variable_completions, -1); rl_bind_key_in_map ('$', bash_possible_variable_completions, emacs_ctlx_keymap); rl_add_defun ("complete-command", bash_complete_command, -1); rl_bind_key_in_map ('!', bash_complete_command, emacs_meta_keymap); rl_add_defun ("possible-command-completions", bash_possible_command_completions, -1); rl_bind_key_in_map ('!', bash_possible_command_completions, emacs_ctlx_keymap); rl_add_defun ("glob-expand-word", bash_glob_expand_word, -1); rl_add_defun ("glob-list-expansions", bash_glob_list_expansions, -1); rl_bind_key_in_map ('*', bash_glob_expand_word, emacs_ctlx_keymap); rl_bind_key_in_map ('g', bash_glob_list_expansions, emacs_ctlx_keymap); #endif rl_add_defun ("dynamic-complete-history", dynamic_complete_history, -1); rl_bind_key_in_map (TAB, dynamic_complete_history, emacs_meta_keymap); rl_attempted_completion_function = (CPPFunction *)attempt_shell_completion; rl_directory_completion_hook = bash_directory_completion_hook; rl_ignore_some_completions_function = (Function *)filename_completion_ignore; #if defined (VI_MODE) rl_bind_key_in_map ('v', vi_edit_and_execute_command, vi_movement_keymap); # if defined (ALIAS) rl_bind_key_in_map ('@', posix_edit_macros, vi_movement_keymap); # endif #endif rl_completer_quote_characters = "'\""; enable_hostname_completion (perform_hostname_completion); rl_filename_quote_characters = " \t\n\\\"'@<>=;|&()#$`?*[!"; rl_filename_quoting_function = bash_quote_filename; rl_filename_dequoting_function = bash_dequote_filename; rl_char_is_quoted_p = char_is_quoted; if (posixly_correct) posix_readline_initialize (1); bash_readline_initialized = 1; }
/****************************************************************************** * configuration.h * * Source of KaHIP -- Karlsruhe High Quality Partitioning. * *****************************************************************************/ #ifndef CONFIGURATION_3APG5V7Z #define CONFIGURATION_3APG5V7Z #include "partition/partition_config.h" class configuration { public: configuration() {} ; virtual ~configuration() {}; void strong( PartitionConfig & config ); void eco( PartitionConfig & config ); void fast( PartitionConfig & config ); void standard( PartitionConfig & config ); void standardsnw( PartitionConfig & config ); void fastsocial( PartitionConfig & config ); void ecosocial( PartitionConfig & config ); void strongsocial( PartitionConfig & config ); }; inline void configuration::strong( PartitionConfig & partition_config ) { standard(partition_config); partition_config.matching_type = MATCHING_GPA; partition_config.permutation_quality = PERMUTATION_QUALITY_GOOD; partition_config.permutation_during_refinement = PERMUTATION_QUALITY_GOOD; partition_config.edge_rating_tiebreaking = true; partition_config.fm_search_limit = 5; partition_config.bank_account_factor = 3; partition_config.edge_rating = EXPANSIONSTAR2; partition_config.refinement_scheduling_algorithm = REFINEMENT_SCHEDULING_ACTIVE_BLOCKS_REF_KWAY; partition_config.refinement_type = REFINEMENT_TYPE_FM_FLOW; partition_config.global_cycle_iterations = 2; partition_config.flow_region_factor = 8; partition_config.corner_refinement_enabled = true; partition_config.kway_stop_rule = KWAY_ADAPTIVE_STOP_RULE; partition_config.kway_adaptive_limits_alpha = 10; partition_config.kway_rounds = 10; partition_config.rate_first_level_inner_outer = true; partition_config.use_wcycles = false; partition_config.no_new_initial_partitioning = true; partition_config.use_fullmultigrid = true; partition_config.most_balanced_minimum_cuts = true; partition_config.local_multitry_fm_alpha = 10; partition_config.local_multitry_rounds = 10; partition_config.mh_initial_population_fraction = 10; partition_config.mh_flip_coin = 1; partition_config.epsilon = 3; partition_config.initial_partitioning_type = INITIAL_PARTITIONING_RECPARTITION; partition_config.bipartition_tries = 4; partition_config.minipreps = 4; partition_config.initial_partitioning_repetitions = 64; partition_config.strong = true; } inline void configuration::eco( PartitionConfig & partition_config ) { standard(partition_config); partition_config.eco = true; partition_config.aggressive_random_levels = std::max(2, (int)(7 - log2(partition_config.k))); partition_config.kway_rounds = std::min(5, (int)log2(partition_config.k)); partition_config.matching_type = MATCHING_RANDOM_GPA; partition_config.permutation_quality = PERMUTATION_QUALITY_NONE; partition_config.permutation_during_refinement = PERMUTATION_QUALITY_GOOD; partition_config.edge_rating = EXPANSIONSTAR2; partition_config.fm_search_limit = 1; partition_config.refinement_type = REFINEMENT_TYPE_FM_FLOW; partition_config.flow_region_factor = 2; partition_config.corner_refinement_enabled = true; partition_config.kway_stop_rule = KWAY_SIMPLE_STOP_RULE; partition_config.kway_fm_search_limit = 1; partition_config.mh_initial_population_fraction = 50; partition_config.mh_flip_coin = 1; partition_config.initial_partitioning_type = INITIAL_PARTITIONING_RECPARTITION; partition_config.bipartition_tries = 4; partition_config.minipreps = 4; partition_config.initial_partitioning_repetitions = 16; } inline void configuration::fast( PartitionConfig & partition_config ) { standard(partition_config); partition_config.fast = true; if(partition_config.k > 8) { partition_config.quotient_graph_refinement_disabled = true; partition_config.kway_fm_search_limit = 0; partition_config.kway_stop_rule = KWAY_SIMPLE_STOP_RULE; partition_config.corner_refinement_enabled = true; } else { partition_config.corner_refinement_enabled = false; } partition_config.permutation_quality = PERMUTATION_QUALITY_FAST; partition_config.permutation_during_refinement = PERMUTATION_QUALITY_NONE; partition_config.matching_type = MATCHING_RANDOM_GPA; partition_config.aggressive_random_levels = 4; partition_config.refinement_scheduling_algorithm = REFINEMENT_SCHEDULING_FAST; partition_config.edge_rating = EXPANSIONSTAR2; partition_config.fm_search_limit = 0; partition_config.bank_account_factor = 1; partition_config.initial_partitioning_type = INITIAL_PARTITIONING_RECPARTITION; partition_config.bipartition_tries = 4; partition_config.minipreps = 1; partition_config.initial_partitioning_repetitions = 0; } inline void configuration::standard( PartitionConfig & partition_config ) { partition_config.filename_output = ""; partition_config.seed = 0; partition_config.fast = false; partition_config.eco = false; partition_config.strong = false; partition_config.imbalance = 3; partition_config.first_level_random_matching = false; partition_config.initial_partitioning_repetitions = 5; partition_config.edge_rating_tiebreaking = false; partition_config.edge_rating = WEIGHT; partition_config.matching_type = MATCHING_RANDOM; partition_config.permutation_quality = PERMUTATION_QUALITY_FAST; partition_config.initial_partitioning = false; partition_config.initial_partitioning_type = INITIAL_PARTITIONING_RECPARTITION; partition_config.bipartition_tries = 9; partition_config.minipreps = 10; partition_config.enable_omp = false; partition_config.combine = false; partition_config.epsilon = 3; partition_config.buffoon = false; partition_config.balance_edges = false; partition_config.time_limit = 0; partition_config.mh_pool_size = 5; partition_config.mh_plain_repetitions = false; partition_config.no_unsuc_reps = 10; partition_config.local_partitioning_repetitions = 1; partition_config.mh_disable_nc_combine = false; partition_config.mh_disable_cross_combine = false; partition_config.mh_disable_combine = false; partition_config.mh_enable_quickstart = false; partition_config.mh_disable_diversify_islands = false; partition_config.mh_diversify = true; partition_config.mh_diversify_best = false; partition_config.mh_cross_combine_original_k = false; partition_config.mh_enable_tournament_selection = true; partition_config.mh_initial_population_fraction = 10; partition_config.mh_flip_coin = 1; partition_config.mh_print_log = false; partition_config.mh_penalty_for_unconnected = false; partition_config.mh_no_mh = false; partition_config.mh_optimize_communication_volume = false; partition_config.use_bucket_queues = true; partition_config.walshaw_mh_repetitions = 50; partition_config.scaleing_factor = 1; partition_config.scale_back = false; partition_config.initial_partition_optimize_fm_limits = 20; partition_config.initial_partition_optimize_multitry_fm_alpha = 20; partition_config.initial_partition_optimize_multitry_rounds = 100; partition_config.suppress_partitioner_output = false; if( partition_config.k <= 4 ) { partition_config.bipartition_post_fm_limits = 30; partition_config.bipartition_post_ml_limits = 6; } else { partition_config.bipartition_post_fm_limits = 25; partition_config.bipartition_post_ml_limits = 5; } partition_config.disable_max_vertex_weight_constraint = false; partition_config.permutation_during_refinement = PERMUTATION_QUALITY_GOOD; partition_config.fm_search_limit = 5; partition_config.use_bucket_queues = false; partition_config.bank_account_factor = 1.5; partition_config.refinement_scheduling_algorithm = REFINEMENT_SCHEDULING_ACTIVE_BLOCKS; partition_config.rate_first_level_inner_outer = false; partition_config.match_islands = false; partition_config.refinement_type = REFINEMENT_TYPE_FM; partition_config.flow_region_factor = 4.0; partition_config.aggressive_random_levels = 3; partition_config.refined_bubbling = true; partition_config.corner_refinement_enabled = false; partition_config.bubbling_iterations = 1; partition_config.kway_rounds = 1; partition_config.quotient_graph_refinement_disabled = false; partition_config.kway_fm_search_limit = 3; partition_config.global_cycle_iterations = 1; partition_config.softrebalance = false; partition_config.rebalance = false; partition_config.use_wcycles = false; partition_config.stop_rule = STOP_RULE_SIMPLE; partition_config.num_vert_stop_factor = 20; partition_config.level_split = 2; partition_config.no_new_initial_partitioning = true; partition_config.omit_given_partitioning = false; partition_config.use_fullmultigrid = false; partition_config.kway_stop_rule = KWAY_SIMPLE_STOP_RULE; partition_config.kway_adaptive_limits_alpha = 1.0; partition_config.max_flow_iterations = 10; partition_config.no_change_convergence = false; partition_config.compute_vertex_separator = false; partition_config.toposort_iterations = 4; partition_config.initial_partition_optimize = false; partition_config.most_balanced_minimum_cuts = false; partition_config.gpa_grow_paths_between_blocks = true; //used for the kaffpa paper partition_config.bipartition_algorithm = BIPARTITION_BFS; partition_config.local_multitry_rounds = 1; partition_config.local_multitry_fm_alpha = 10; partition_config.only_first_level = false; partition_config.use_balance_singletons = true; partition_config.disable_hard_rebalance = false; partition_config.amg_iterations = 5; partition_config.kaffpa_perfectly_balance = false; partition_config.remove_negative_cycles = false; partition_config.cycle_refinement_algorithm = CYCLE_REFINEMENT_ALGORITHM_ULTRA_MODEL; partition_config.kabaE_internal_bal = 0.01; partition_config.kaba_packing_iterations = 20; partition_config.kaba_flip_packings = false; partition_config.kaba_lsearch_p = NOCOIN_RNDTIE; partition_config.kaffpa_perfectly_balanced_refinement = false; partition_config.kaba_enable_zero_weight_cycles = true; partition_config.mh_enable_gal_combine = false; partition_config.mh_easy_construction = false; partition_config.maxT = 100; partition_config.maxIter = 500000; if( partition_config.k <= 8 ) { partition_config.kaba_internal_no_aug_steps_aug = 15; } else { partition_config.kaba_internal_no_aug_steps_aug = 7; } partition_config.kaba_unsucc_iterations = 6; partition_config.initial_bipartitioning = false; partition_config.kabapE = false; #ifdef MODE_BUFFOON partition_config.mh_diversify = true; partition_config.kway_fm_search_limit = 10; partition_config.fm_search_limit = 10; partition_config.initial_partition_optimize = true; partition_config.nc_div = 2; partition_config.nc_b = 1; #endif // social networking parameters partition_config.cluster_coarsening_factor = 18; partition_config.ensemble_clusterings = false; partition_config.label_iterations = 10; partition_config.label_iterations_refinement = 25; partition_config.number_of_clusterings = 1; partition_config.label_propagation_refinement = false; partition_config.balance_factor = 0; partition_config.cluster_coarsening_during_ip = false; partition_config.set_upperbound = true; partition_config.repetitions = 1; partition_config.node_ordering = DEGREE_NODEORDERING; } inline void configuration::standardsnw( PartitionConfig & partition_config ) { partition_config.matching_type = CLUSTER_COARSENING; partition_config.stop_rule = STOP_RULE_MULTIPLE_K; partition_config.num_vert_stop_factor = 5000; if(2 <= partition_config.k && partition_config.k <= 3) { partition_config.number_of_clusterings = 18; } else if(4 <= partition_config.k && partition_config.k <= 7) { partition_config.number_of_clusterings = 17; } else if(8 <= partition_config.k && partition_config.k <= 15) { partition_config.number_of_clusterings = 15; } else if(16 <= partition_config.k && partition_config.k <= 31) { partition_config.number_of_clusterings = 7; } else { partition_config.number_of_clusterings = 3; } partition_config.balance_factor = 0.016; if( partition_config.k <= 8 ) { partition_config.balance_factor = 0.00; } } inline void configuration::fastsocial( PartitionConfig & partition_config ) { eco(partition_config); standardsnw(partition_config); partition_config.label_propagation_refinement = true; partition_config.cluster_coarsening_during_ip = true; partition_config.balance_factor = 0; } inline void configuration::ecosocial( PartitionConfig & partition_config ) { eco(partition_config); standardsnw(partition_config); partition_config.label_propagation_refinement = false; partition_config.global_cycle_iterations = 3; partition_config.use_wcycles = false; partition_config.no_new_initial_partitioning = true; partition_config.balance_factor = 0.016; partition_config.cluster_coarsening_during_ip = true; } inline void configuration::strongsocial( PartitionConfig & partition_config ) { strong(partition_config); standardsnw(partition_config); partition_config.label_propagation_refinement = false; partition_config.cluster_coarsening_during_ip = true; partition_config.ensemble_clusterings = true; } #endif /* end of include guard: CONFIGURATION_3APG5V7Z */
/** * __skb_gso_segment - Perform segmentation on skb. * @skb: buffer to segment * @features: features for the output path (see dev->features) * @tx_path: whether it is called in TX path * * This function segments the given skb and returns a list of segments. * * It may return NULL if the skb requires no segmentation. This is * only possible when GSO is used for verifying header integrity. */ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path) { if (unlikely(skb_needs_check(skb, tx_path))) { int err; skb_warn_bad_offload(skb); if (skb_header_cloned(skb) && (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) return ERR_PTR(err); } SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); skb_reset_mac_header(skb); skb_reset_mac_len(skb); return skb_mac_gso_segment(skb, features); }
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI sockets. */ #include <linux/module.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/socket.h> #include <linux/ioctl.h> #include <net/sock.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #ifndef CONFIG_BT_HCI_SOCK_DEBUG #undef BT_DBG #define BT_DBG(D...) #endif /* ----- HCI socket interface ----- */ static inline int hci_test_bit(int nr, void *addr) { return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); } /* Security filter */ static struct hci_sec_filter hci_sec_filter = { /* Packet types */ 0x10, /* Events */ { 0x1000d9fe, 0x0000b00c }, /* Commands */ { { 0x0 }, /* OGF_LINK_CTL */ { 0xbe000006, 0x00000001, 0x000000, 0x00 }, /* OGF_LINK_POLICY */ { 0x00005200, 0x00000000, 0x000000, 0x00 }, /* OGF_HOST_CTL */ { 0xaab00200, 0x2b402aaa, 0x020154, 0x00 }, /* OGF_INFO_PARAM */ { 0x000002be, 0x00000000, 0x000000, 0x00 }, /* OGF_STATUS_PARAM */ { 0x000000ea, 0x00000000, 0x000000, 0x00 } } }; static struct bt_sock_list hci_sk_list = { .lock = RW_LOCK_UNLOCKED }; /* Send frame to RAW socket */ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) { struct sock *sk; struct hlist_node *node; BT_DBG("hdev %p len %d", hdev, skb->len); read_lock(&hci_sk_list.lock); sk_for_each(sk, node, &hci_sk_list.head) { struct hci_filter *flt; struct sk_buff *nskb; if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) continue; /* Don't send frame to the socket it came from */ if (skb->sk == sk) continue; /* Apply filter */ flt = &hci_pi(sk)->filter; if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) continue; if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); if (!hci_test_bit(evt, &flt->event_mask)) continue; if (flt->opcode && ((evt == HCI_EV_CMD_COMPLETE && flt->opcode != get_unaligned((__le16 *)(skb->data + 3))) || (evt == HCI_EV_CMD_STATUS && flt->opcode != get_unaligned((__le16 *)(skb->data + 4))))) continue; } if (!(nskb = skb_clone(skb, GFP_ATOMIC))) continue; /* Put type byte before the data */ memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1); if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&hci_sk_list.lock); } static int hci_sock_release(struct socket *sock) { struct sock *sk = sock->sk; struct hci_dev *hdev; BT_DBG("sock %p sk %p", sock, sk); if (!sk) return 0; hdev = hci_pi(sk)->hdev; bt_sock_unlink(&hci_sk_list, sk); if (hdev) { atomic_dec(&hdev->promisc); hci_dev_put(hdev); } sock_orphan(sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); sock_put(sk); return 0; } /* Ioctls that require bound socket */ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { struct hci_dev *hdev = hci_pi(sk)->hdev; if (!hdev) return -EBADFD; switch (cmd) { case HCISETRAW: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return -EPERM; if (arg) set_bit(HCI_RAW, &hdev->flags); else clear_bit(HCI_RAW, &hdev->flags); return 0; case HCISETSECMGR: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (arg) set_bit(HCI_SECMGR, &hdev->flags); else clear_bit(HCI_SECMGR, &hdev->flags); return 0; case HCIGETCONNINFO: return hci_get_conn_info(hdev, (void __user *)arg); default: if (hdev->ioctl) return hdev->ioctl(hdev, cmd, arg); return -EINVAL; } } static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; void __user *argp = (void __user *)arg; int err; BT_DBG("cmd %x arg %lx", cmd, arg); switch (cmd) { case HCIGETDEVLIST: return hci_get_dev_list(argp); case HCIGETDEVINFO: return hci_get_dev_info(argp); case HCIGETCONNLIST: return hci_get_conn_list(argp); case HCIDEVUP: if (!capable(CAP_NET_ADMIN)) return -EACCES; return hci_dev_open(arg); case HCIDEVDOWN: if (!capable(CAP_NET_ADMIN)) return -EACCES; return hci_dev_close(arg); case HCIDEVRESET: if (!capable(CAP_NET_ADMIN)) return -EACCES; return hci_dev_reset(arg); case HCIDEVRESTAT: if (!capable(CAP_NET_ADMIN)) return -EACCES; return hci_dev_reset_stat(arg); case HCISETSCAN: case HCISETAUTH: case HCISETENCRYPT: case HCISETPTYPE: case HCISETLINKPOL: case HCISETLINKMODE: case HCISETACLMTU: case HCISETSCOMTU: if (!capable(CAP_NET_ADMIN)) return -EACCES; return hci_dev_cmd(cmd, argp); case HCIINQUIRY: return hci_inquiry(argp); default: lock_sock(sk); err = hci_sock_bound_ioctl(sk, cmd, arg); release_sock(sk); return err; } } static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; struct sock *sk = sock->sk; struct hci_dev *hdev = NULL; int err = 0; BT_DBG("sock %p sk %p", sock, sk); if (!haddr || haddr->hci_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); if (hci_pi(sk)->hdev) { err = -EALREADY; goto done; } if (haddr->hci_dev != HCI_DEV_NONE) { if (!(hdev = hci_dev_get(haddr->hci_dev))) { err = -ENODEV; goto done; } atomic_inc(&hdev->promisc); } hci_pi(sk)->hdev = hdev; sk->sk_state = BT_BOUND; done: release_sock(sk); return err; } static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) { struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; struct sock *sk = sock->sk; struct hci_dev *hdev = hci_pi(sk)->hdev; BT_DBG("sock %p sk %p", sock, sk); if (!hdev) return -EBADFD; lock_sock(sk); *addr_len = sizeof(*haddr); haddr->hci_family = AF_BLUETOOTH; haddr->hci_dev = hdev->id; release_sock(sk); return 0; } static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { __u32 mask = hci_pi(sk)->cmsg_mask; if (mask & HCI_CMSG_DIR) { int incoming = bt_cb(skb)->incoming; put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); } if (mask & HCI_CMSG_TSTAMP) { struct timeval tv; skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, sizeof(tv), &tv); } } static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; BT_DBG("sock %p, sk %p", sock, sk); if (flags & (MSG_OOB)) return -EOPNOTSUPP; if (sk->sk_state == BT_CLOSED) return 0; if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) return err; msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); hci_sock_cmsg(sk, msg, skb); skb_free_datagram(sk, skb); return err ? : copied; } static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct hci_dev *hdev; struct sk_buff *skb; int err; BT_DBG("sock %p sk %p", sock, sk); if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) return -EINVAL; if (len < 4 || len > HCI_MAX_FRAME_SIZE) return -EINVAL; lock_sock(sk); if (!(hdev = hci_pi(sk)->hdev)) { err = -EBADFD; goto done; } if (!(skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err))) goto done; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { err = -EFAULT; goto drop; } bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); skb_pull(skb, 1); skb->dev = (void *) hdev; if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { u16 opcode = __le16_to_cpu(get_unaligned((__le16 *) skb->data)); u16 ogf = hci_opcode_ogf(opcode); u16 ocf = hci_opcode_ocf(opcode); if (((ogf > HCI_SFLT_MAX_OGF) || !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && !capable(CAP_NET_RAW)) { err = -EPERM; goto drop; } if (test_bit(HCI_RAW, &hdev->flags) || (ogf == OGF_VENDOR_CMD)) { skb_queue_tail(&hdev->raw_q, skb); hci_sched_tx(hdev); } else { skb_queue_tail(&hdev->cmd_q, skb); hci_sched_cmd(hdev); } } else { if (!capable(CAP_NET_RAW)) { err = -EPERM; goto drop; } skb_queue_tail(&hdev->raw_q, skb); hci_sched_tx(hdev); } err = len; done: release_sock(sk); return err; drop: kfree_skb(skb); goto done; } static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int len) { struct hci_ufilter uf = { .opcode = 0 }; struct sock *sk = sock->sk; int err = 0, opt = 0; BT_DBG("sk %p, opt %d", sk, optname); lock_sock(sk); switch (optname) { case HCI_DATA_DIR: if (get_user(opt, (int __user *)optval)) { err = -EFAULT; break; } if (opt) hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR; else hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR; break; case HCI_TIME_STAMP: if (get_user(opt, (int __user *)optval)) { err = -EFAULT; break; } if (opt) hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP; else hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP; break; case HCI_FILTER: { struct hci_filter *f = &hci_pi(sk)->filter; uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); uf.event_mask[1] = *((u32 *) f->event_mask + 1); } len = min_t(unsigned int, len, sizeof(uf)); if (copy_from_user(&uf, optval, len)) { err = -EFAULT; break; } if (!capable(CAP_NET_RAW)) { uf.type_mask &= hci_sec_filter.type_mask; uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0); uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1); } { struct hci_filter *f = &hci_pi(sk)->filter; f->type_mask = uf.type_mask; f->opcode = uf.opcode; *((u32 *) f->event_mask + 0) = uf.event_mask[0]; *((u32 *) f->event_mask + 1) = uf.event_mask[1]; } break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct hci_ufilter uf; struct sock *sk = sock->sk; int len, opt; if (get_user(len, optlen)) return -EFAULT; switch (optname) { case HCI_DATA_DIR: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) opt = 1; else opt = 0; if (put_user(opt, optval)) return -EFAULT; break; case HCI_TIME_STAMP: if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) opt = 1; else opt = 0; if (put_user(opt, optval)) return -EFAULT; break; case HCI_FILTER: { struct hci_filter *f = &hci_pi(sk)->filter; uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); uf.event_mask[1] = *((u32 *) f->event_mask + 1); } len = min_t(unsigned int, len, sizeof(uf)); if (copy_to_user(optval, &uf, len)) return -EFAULT; break; default: return -ENOPROTOOPT; break; } return 0; } static const struct proto_ops hci_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = hci_sock_release, .bind = hci_sock_bind, .getname = hci_sock_getname, .sendmsg = hci_sock_sendmsg, .recvmsg = hci_sock_recvmsg, .ioctl = hci_sock_ioctl, .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = hci_sock_setsockopt, .getsockopt = hci_sock_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static struct proto hci_sk_proto = { .name = "HCI", .owner = THIS_MODULE, .obj_size = sizeof(struct hci_pinfo) }; static int hci_sock_create(struct socket *sock, int protocol) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &hci_sock_ops; sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, 1); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sock->state = SS_UNCONNECTED; sk->sk_state = BT_OPEN; bt_sock_link(&hci_sk_list, sk); return 0; } static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct hci_dev *hdev = (struct hci_dev *) ptr; struct hci_ev_si_device ev; BT_DBG("hdev %s event %ld", hdev->name, event); /* Send event to sockets */ ev.event = event; ev.dev_id = hdev->id; hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); if (event == HCI_DEV_UNREG) { struct sock *sk; struct hlist_node *node; /* Detach sockets from device */ read_lock(&hci_sk_list.lock); sk_for_each(sk, node, &hci_sk_list.head) { local_bh_disable(); bh_lock_sock_nested(sk); if (hci_pi(sk)->hdev == hdev) { hci_pi(sk)->hdev = NULL; sk->sk_err = EPIPE; sk->sk_state = BT_OPEN; sk->sk_state_change(sk); hci_dev_put(hdev); } bh_unlock_sock(sk); local_bh_enable(); } read_unlock(&hci_sk_list.lock); } return NOTIFY_DONE; } static struct net_proto_family hci_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = hci_sock_create, }; static struct notifier_block hci_sock_nblock = { .notifier_call = hci_sock_dev_event }; int __init hci_sock_init(void) { int err; err = proto_register(&hci_sk_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); if (err < 0) goto error; hci_register_notifier(&hci_sock_nblock); BT_INFO("HCI socket layer initialized"); return 0; error: BT_ERR("HCI socket registration failed"); proto_unregister(&hci_sk_proto); return err; } int __exit hci_sock_cleanup(void) { if (bt_sock_unregister(BTPROTO_HCI) < 0) BT_ERR("HCI socket unregistration failed"); hci_unregister_notifier(&hci_sock_nblock); proto_unregister(&hci_sk_proto); return 0; }
/* * Resizes the memory pointed to by ptr to size bytes. * * @param ptr Address of the memory region to resize. * @param size The minimum size to resize the memory to. * * @return If successful, the pointer to a valid region of memory is * returned, else NULL is returned and sf_errno is set appropriately. * * If sf_realloc is called with an invalid pointer sf_errno should be set to EINVAL. * If there is no memory available sf_realloc should set sf_errno to ENOMEM. * * If sf_realloc is called with a valid pointer and a size of 0 it should free * the allocated block and return NULL without setting sf_errno. */ void *sf_realloc(void *pp, size_t rsize) { if(invalid_pointer_check(pp)){ abort(); } if(rsize == 0){ sf_free(pp); return NULL; } sf_header *block_to_realloc = (sf_header*)((char *) pp - sizeof(sf_block_info)); if((block_to_realloc->info.block_size << 4) < round_up(rsize)){ void *larger_block = sf_malloc(rsize); if(larger_block == NULL){ return NULL; } memcpy(larger_block, pp, block_to_realloc->info.requested_size); sf_free(pp); return larger_block; } else if((block_to_realloc->info.block_size << 4) > round_up(rsize)){ if(splinter_check(round_up(rsize), block_to_realloc)){ block_to_realloc->info.requested_size = rsize; } else{ block_to_realloc = split_allocated_block(rsize, round_up(rsize), block_to_realloc); sf_header *new_next_free = (sf_header *) ((char *) block_to_realloc + (block_to_realloc->info.block_size << 4)); new_next_free = coalese(new_next_free); sf_free_list_node *best_fit_ptr = find_free_list_match(new_next_free->info.block_size << 4); if(best_fit_ptr == NULL){ best_fit_ptr = create_free_list(new_next_free->info.block_size << 4); } sf_add_free_block(best_fit_ptr, new_next_free); } return allocate(block_to_realloc, rsize); } else{ return pp; } }
/* Set object interface */ #ifndef Py_SETOBJECT_H #define Py_SETOBJECT_H #ifdef __cplusplus extern "C" { #endif /* There are three kinds of slots in the table: 1. Unused: key == NULL 2. Active: key != NULL and key != dummy 3. Dummy: key == dummy Note: .pop() abuses the hash field of an Unused or Dummy slot to hold a search finger. The hash field of Unused or Dummy slots has no meaning otherwise. */ #define PySet_MINSIZE 8 typedef struct { long hash; /* cached hash code for the entry key */ PyObject *key; } setentry; /* This data structure is shared by set and frozenset objects. */ typedef struct _setobject PySetObject; struct _setobject { PyObject_HEAD Py_ssize_t fill; /* # Active + # Dummy */ Py_ssize_t used; /* # Active */ /* The table contains mask + 1 slots, and that's a power of 2. * We store the mask instead of the size because the mask is more * frequently needed. */ Py_ssize_t mask; /* table points to smalltable for small tables, else to * additional malloc'ed memory. table is never NULL! This rule * saves repeated runtime null-tests. */ setentry *table; setentry *(*lookup)(PySetObject *so, PyObject *key, long hash); setentry smalltable[PySet_MINSIZE]; long hash; /* only used by frozenset objects */ PyObject *weakreflist; /* List of weak references */ }; PyAPI_DATA(PyTypeObject) PySet_Type; PyAPI_DATA(PyTypeObject) PyFrozenSet_Type; /* Invariants for frozensets: * data is immutable. * hash is the hash of the frozenset or -1 if not computed yet. * Invariants for sets: * hash is -1 */ #define PyFrozenSet_CheckExact(ob) (Py_TYPE(ob) == &PyFrozenSet_Type) #define PyAnySet_CheckExact(ob) \ (Py_TYPE(ob) == &PySet_Type || Py_TYPE(ob) == &PyFrozenSet_Type) #define PyAnySet_Check(ob) \ (Py_TYPE(ob) == &PySet_Type || Py_TYPE(ob) == &PyFrozenSet_Type || \ PyType_IsSubtype(Py_TYPE(ob), &PySet_Type) || \ PyType_IsSubtype(Py_TYPE(ob), &PyFrozenSet_Type)) #define PySet_Check(ob) \ (Py_TYPE(ob) == &PySet_Type || \ PyType_IsSubtype(Py_TYPE(ob), &PySet_Type)) #define PyFrozenSet_Check(ob) \ (Py_TYPE(ob) == &PyFrozenSet_Type || \ PyType_IsSubtype(Py_TYPE(ob), &PyFrozenSet_Type)) PyAPI_FUNC(PyObject *) PySet_New(PyObject *); PyAPI_FUNC(PyObject *) PyFrozenSet_New(PyObject *); PyAPI_FUNC(Py_ssize_t) PySet_Size(PyObject *anyset); #define PySet_GET_SIZE(so) (((PySetObject *)(so))->used) PyAPI_FUNC(int) PySet_Clear(PyObject *set); PyAPI_FUNC(int) PySet_Contains(PyObject *anyset, PyObject *key); PyAPI_FUNC(int) PySet_Discard(PyObject *set, PyObject *key); PyAPI_FUNC(int) PySet_Add(PyObject *set, PyObject *key); PyAPI_FUNC(int) _PySet_Next(PyObject *set, Py_ssize_t *pos, PyObject **key); PyAPI_FUNC(int) _PySet_NextEntry(PyObject *set, Py_ssize_t *pos, PyObject **key, long *hash); PyAPI_FUNC(PyObject *) PySet_Pop(PyObject *set); PyAPI_FUNC(int) _PySet_Update(PyObject *set, PyObject *iterable); #ifdef __cplusplus } #endif #endif /* !Py_SETOBJECT_H */
/* Set protection contexts for bus masters */ cy_en_prot_status_t bus_masters_protect(cy_bus_master_config_t bus_masters_config_arr[], uint32_t arr_length) { cy_en_prot_status_t ret = CY_PROT_SUCCESS; uint32_t i; for (i = 0; i < arr_length; i++) { ret = Cy_Prot_ConfigBusMaster(bus_masters_config_arr[i].busMaster, bus_masters_config_arr[i].privileged, bus_masters_config_arr[i].secure, bus_masters_config_arr[i].pcMask); if (ret != CY_PROT_SUCCESS) { break; } ret = Cy_Prot_SetActivePC(bus_masters_config_arr[i].busMaster, bus_masters_config_arr[i].act_pc); if (ret != CY_PROT_SUCCESS) { break; } } return ret; }
/* Private user code ---------------------------------------------------------*/ /* USER CODE BEGIN 0 */ void ProcessDMADone(void) { data_to_process[buffer_index][data_to_process_index] = adc_buffer[8]; data_to_process_index++; if (data_to_process_index >= 2048) { data_to_process_index = 0x0; buffer_index = 0x1 - buffer_index; BufferReadyToProcess = 0x1; } HAL_GPIO_WritePin(GPIOC,GPIO_PIN_1,0); }
/** * @brief Appends a copy of the string pointed to by src, into the string * pointed to by dest. The null-terminator in dest is overwritten by the first * character of src, and a null-terminator is included at the end of the new * string formed by the concatenation of both in dest. * * @param dest A pointer to the destination string * @param src A pointer to the source string * @return char* dest */ char *strcat(char *restrict dest, const char *restrict src) { const size_t len = strlen(dest); for (size_t i = 0; i < strlen(src); i++) { dest[len + i] = src[i]; } return dest; }
/* expert.h * Collecting of Expert information. * * For further info, see: https://wiki.wireshark.org/Development/ExpertInfo * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef __EXPERT_H__ #define __EXPERT_H__ #include <epan/packet_info.h> #include <epan/proto.h> #include "value_string.h" #include "ws_symbol_export.h" #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /** only for internal and display use. */ typedef struct expert_info_s { guint32 packet_num; int group; int severity; int hf_index; /* hf_index of the expert item. Might be -1. */ const gchar *protocol; gchar *summary; proto_item *pitem; } expert_info_t; /* Expert Info and Display hf data */ typedef struct expert_field { int ei; int hf; } expert_field; #define EI_INIT_EI -1 #define EI_INIT_HF -1 #define EI_INIT {EI_INIT_EI, EI_INIT_HF} typedef struct expert_field_info { /* ---------- set by dissector --------- */ const char *name; int group; int severity; const gchar *summary; /* ------- set by register routines (prefilled by EXPFILL macro, see below) ------ */ int id; const gchar *protocol; int orig_severity; /* Matches severity when registered, used to restore original severity * if UAT severity entry is removed */ hf_register_info hf_info; } expert_field_info; #define EXPFILL 0, NULL, 0, \ {0, {"Expert Info", NULL, FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL}} typedef struct ei_register_info { expert_field *ids; /**< written to by register() function */ expert_field_info eiinfo; /**< the field info to be registered */ } ei_register_info; typedef struct expert_module expert_module_t; #define PRE_ALLOC_EXPERT_FIELDS_MEM 5000 /* "proto_expert" is exported from libwireshark.dll. * Thus we need a special declaration. */ WS_DLL_PUBLIC int proto_expert; extern void expert_init(void); extern void expert_packet_init(void); extern void expert_cleanup(void); extern void expert_packet_cleanup(void); WS_DLL_PUBLIC int expert_get_highest_severity(void); WS_DLL_PUBLIC void expert_update_comment_count(guint64 count); /** Add an expert info. Add an expert info tree to a protocol item using registered expert info item @param pinfo Packet info of the currently processed packet. May be NULL if pi is supplied @param pi Current protocol item (or NULL) @param eiindex The registered expert info item */ WS_DLL_PUBLIC void expert_add_info(packet_info *pinfo, proto_item *pi, expert_field *eiindex); /** Add an expert info. Add an expert info tree to a protocol item using registered expert info item, but with a formatted message. @param pinfo Packet info of the currently processed packet. May be NULL if pi is supplied @param pi Current protocol item (or NULL) @param eiindex The registered expert info item @param format Printf-style format string for additional arguments */ WS_DLL_PUBLIC void expert_add_info_format(packet_info *pinfo, proto_item *pi, expert_field *eiindex, const char *format, ...) G_GNUC_PRINTF(4, 5); /** Add an expert info associated with some byte data Add an expert info tree to a protocol item using registered expert info item. This function is intended to replace places where a "text only" proto_tree_add_xxx API + expert_add_info would be used. @param tree Current protocol tree (or NULL) @param pinfo Packet info of the currently processed packet. May be NULL if tree is supplied @param eiindex The registered expert info item @param tvb the tv buffer of the current data @param start start of data in tvb @param length length of data in tvb @return the newly created item above expert info tree */ WS_DLL_PUBLIC proto_item * proto_tree_add_expert(proto_tree *tree, packet_info *pinfo, expert_field *eiindex, tvbuff_t *tvb, gint start, gint length); /** Add an expert info associated with some byte data Add an expert info tree to a protocol item, using registered expert info item, but with a formatted message. Add an expert info tree to a protocol item using registered expert info item. This function is intended to replace places where a "text only" proto_tree_add_xxx API + expert_add_info_format would be used. @param tree Current protocol tree (or NULL) @param pinfo Packet info of the currently processed packet. May be NULL if tree is supplied @param eiindex The registered expert info item @param tvb the tv buffer of the current data @param start start of data in tvb @param length length of data in tvb @param format Printf-style format string for additional arguments @return the newly created item above expert info tree */ WS_DLL_PUBLIC proto_item * proto_tree_add_expert_format(proto_tree *tree, packet_info *pinfo, expert_field *eiindex, tvbuff_t *tvb, gint start, gint length, const char *format, ...) G_GNUC_PRINTF(7, 8); /* * Register that a protocol has expert info. */ WS_DLL_PUBLIC expert_module_t *expert_register_protocol(int id); /** * Deregister a expert info. */ void expert_deregister_expertinfo (const char *abbrev); /** * Deregister expert info from a protocol. */ void expert_deregister_protocol (expert_module_t *module); /** * Free deregistered expert infos. */ void expert_free_deregistered_expertinfos (void); /** * Get summary text of an expert_info field. * This is intended for use in expert_add_info_format or proto_tree_add_expert_format * to get the "base" string to then append additional information */ WS_DLL_PUBLIC const gchar* expert_get_summary(expert_field *eiindex); /** Register a expert field array. @param module the protocol handle from expert_register_protocol() @param ei the ei_register_info array @param num_records the number of records in exp */ WS_DLL_PUBLIC void expert_register_field_array(expert_module_t *module, ei_register_info *ei, const int num_records); #define EXPERT_CHECKSUM_DISABLED -2 #define EXPERT_CHECKSUM_UNKNOWN -1 #define EXPERT_CHECKSUM_GOOD 0 #define EXPERT_CHECKSUM_BAD 1 WS_DLL_PUBLIC const value_string expert_group_vals[]; WS_DLL_PUBLIC const value_string expert_severity_vals[]; WS_DLL_PUBLIC const value_string expert_checksum_vals[]; #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* __EXPERT_H__ */ /* * Editor modelines - https://www.wireshark.org/tools/modelines.html * * Local variables: * c-basic-offset: 8 * tab-width: 8 * indent-tabs-mode: t * End: * * vi: set shiftwidth=8 tabstop=8 noexpandtab: * :indentSize=8:tabSize=8:noTabs=false: */
/** * Compute the entropy percentage of the specified array in relation to the * maximum block threshold. * * @param array The array for which the percentage will be computed. * @param size The size of the specified array. * @return The entropy percentage of the specified array. */ static inline const double es_compute_array_entropy_percentage( const char *array, const int size) { return ((double)(strlen(array) * ES_MAXIMUM_BLOCK_THRESHOLD)) / size; }
/* * This function is called from the entry point for all programs. */ void WinMainCRTStartup (HINSTANCE hInst, HINSTANCE hPrevInst, LPWSTR lpCmdLine, int nCmdShow) { int nRet; _fpreset (); _pei386_runtime_relocator (); __atexit_init(); __gccmain(); nRet = WinMain(hInst, hPrevInst, lpCmdLine, nCmdShow); _cexit (); ExitProcess (nRet); }
/* * linux/arch/arm/mach-omap1/serial.c * * OMAP1 serial support. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/serial.h> #include <linux/tty.h> #include <linux/serial_8250.h> #include <linux/serial_reg.h> #include <linux/clk.h> #include <linux/io.h> #include <asm/mach-types.h> #include <mach/mux.h> #include "pm.h" static struct clk * uart1_ck; static struct clk * uart2_ck; static struct clk * uart3_ck; static inline unsigned int omap_serial_in(struct plat_serial8250_port *up, int offset) { offset <<= up->regshift; return (unsigned int)__raw_readb(up->membase + offset); } static inline void omap_serial_outp(struct plat_serial8250_port *p, int offset, int value) { offset <<= p->regshift; __raw_writeb(value, p->membase + offset); } /* * Internal UARTs need to be initialized for the 8250 autoconfig to work * properly. Note that the TX watermark initialization may not be needed * once the 8250.c watermark handling code is merged. */ static void __init omap_serial_reset(struct plat_serial8250_port *p) { omap_serial_outp(p, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE); /* disable UART */ omap_serial_outp(p, UART_OMAP_SCR, 0x08); /* TX watermark */ omap_serial_outp(p, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE); /* enable UART */ if (!cpu_is_omap15xx()) { omap_serial_outp(p, UART_OMAP_SYSC, 0x01); while (!(omap_serial_in(p, UART_OMAP_SYSC) & 0x01)); } } static struct plat_serial8250_port serial_platform_data[] = { { .mapbase = OMAP1_UART1_BASE, .irq = INT_UART1, .flags = UPF_BOOT_AUTOCONF, .iotype = UPIO_MEM, .regshift = 2, .uartclk = OMAP16XX_BASE_BAUD * 16, }, { .mapbase = OMAP1_UART2_BASE, .irq = INT_UART2, .flags = UPF_BOOT_AUTOCONF, .iotype = UPIO_MEM, .regshift = 2, .uartclk = OMAP16XX_BASE_BAUD * 16, }, { .mapbase = OMAP1_UART3_BASE, .irq = INT_UART3, .flags = UPF_BOOT_AUTOCONF, .iotype = UPIO_MEM, .regshift = 2, .uartclk = OMAP16XX_BASE_BAUD * 16, }, { }, }; static struct platform_device serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = serial_platform_data, }, }; /* * Note that on Innovator-1510 UART2 pins conflict with USB2. * By default UART2 does not work on Innovator-1510 if you have * USB OHCI enabled. To use UART2, you must disable USB2 first. */ void __init omap_serial_init(void) { int i; if (cpu_is_omap7xx()) { serial_platform_data[0].regshift = 0; serial_platform_data[1].regshift = 0; serial_platform_data[0].irq = INT_7XX_UART_MODEM_1; serial_platform_data[1].irq = INT_7XX_UART_MODEM_IRDA_2; } if (cpu_is_omap15xx()) { serial_platform_data[0].uartclk = OMAP1510_BASE_BAUD * 16; serial_platform_data[1].uartclk = OMAP1510_BASE_BAUD * 16; serial_platform_data[2].uartclk = OMAP1510_BASE_BAUD * 16; } for (i = 0; i < ARRAY_SIZE(serial_platform_data) - 1; i++) { /* Don't look at UARTs higher than 2 for omap7xx */ if (cpu_is_omap7xx() && i > 1) { serial_platform_data[i].membase = NULL; serial_platform_data[i].mapbase = 0; continue; } /* Static mapping, never released */ serial_platform_data[i].membase = ioremap(serial_platform_data[i].mapbase, SZ_2K); if (!serial_platform_data[i].membase) { printk(KERN_ERR "Could not ioremap uart%i\n", i); continue; } switch (i) { case 0: uart1_ck = clk_get(NULL, "uart1_ck"); if (IS_ERR(uart1_ck)) printk("Could not get uart1_ck\n"); else { clk_enable(uart1_ck); if (cpu_is_omap15xx()) clk_set_rate(uart1_ck, 12000000); } break; case 1: uart2_ck = clk_get(NULL, "uart2_ck"); if (IS_ERR(uart2_ck)) printk("Could not get uart2_ck\n"); else { clk_enable(uart2_ck); if (cpu_is_omap15xx()) clk_set_rate(uart2_ck, 12000000); else clk_set_rate(uart2_ck, 48000000); } break; case 2: uart3_ck = clk_get(NULL, "uart3_ck"); if (IS_ERR(uart3_ck)) printk("Could not get uart3_ck\n"); else { clk_enable(uart3_ck); if (cpu_is_omap15xx()) clk_set_rate(uart3_ck, 12000000); } break; } omap_serial_reset(&serial_platform_data[i]); } } #ifdef CONFIG_OMAP_SERIAL_WAKE static irqreturn_t omap_serial_wake_interrupt(int irq, void *dev_id) { /* Need to do something with serial port right after wake-up? */ return IRQ_HANDLED; } /* * Reroutes serial RX lines to GPIO lines for the duration of * sleep to allow waking up the device from serial port even * in deep sleep. */ void omap_serial_wake_trigger(int enable) { if (!cpu_is_omap16xx()) return; if (uart1_ck != NULL) { if (enable) omap_cfg_reg(V14_16XX_GPIO37); else omap_cfg_reg(V14_16XX_UART1_RX); } if (uart2_ck != NULL) { if (enable) omap_cfg_reg(R9_16XX_GPIO18); else omap_cfg_reg(R9_16XX_UART2_RX); } if (uart3_ck != NULL) { if (enable) omap_cfg_reg(L14_16XX_GPIO49); else omap_cfg_reg(L14_16XX_UART3_RX); } } static void __init omap_serial_set_port_wakeup(int gpio_nr) { int ret; ret = gpio_request(gpio_nr, "UART wake"); if (ret < 0) { printk(KERN_ERR "Could not request UART wake GPIO: %i\n", gpio_nr); return; } gpio_direction_input(gpio_nr); ret = request_irq(gpio_to_irq(gpio_nr), &omap_serial_wake_interrupt, IRQF_TRIGGER_RISING, "serial wakeup", NULL); if (ret) { gpio_free(gpio_nr); printk(KERN_ERR "No interrupt for UART wake GPIO: %i\n", gpio_nr); return; } enable_irq_wake(gpio_to_irq(gpio_nr)); } int __init omap_serial_wakeup_init(void) { if (!cpu_is_omap16xx()) return 0; if (uart1_ck != NULL) omap_serial_set_port_wakeup(37); if (uart2_ck != NULL) omap_serial_set_port_wakeup(18); if (uart3_ck != NULL) omap_serial_set_port_wakeup(49); return 0; } #endif /* CONFIG_OMAP_SERIAL_WAKE */ static int __init omap_init(void) { if (!cpu_class_is_omap1()) return -ENODEV; return platform_device_register(&serial_device); } arch_initcall(omap_init);
/* Dummy open * Accepts: stream to open * Returns: stream on success, NIL on failure */ MAILSTREAM *dummy_open (MAILSTREAM *stream) { int fd; char err[MAILTMPLEN],tmp[MAILTMPLEN]; struct stat sbuf; if (!stream) return &dummyproto; err[0] = '\0'; if (!dummy_file (tmp,stream->mailbox)) sprintf (err,"Can't open this name: %.80s",stream->mailbox); else if ((fd = open (tmp,O_RDONLY,NIL)) < 0) { if (compare_cstring (stream->mailbox,"INBOX")) sprintf (err,"%.80s: %.80s",strerror (errno),stream->mailbox); } else { fstat (fd,&sbuf); close (fd); if (sbuf.st_size) sprintf (err,"%.80s (file %.80s) is not in valid mailbox format", stream->mailbox,tmp); } if (err[0]) { mm_log (err,stream->silent ? WARN : ERROR); return NIL; } else if (!stream->silent) { mail_exists (stream,0); mail_recent (stream,0); stream->uid_validity = time (0); } stream->inbox = T; return stream; }
/* * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #ifndef MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_VAD_H_ #define MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_VAD_H_ #include <stddef.h> #include "modules/audio_coding/codecs/isac/main/source/structs.h" void WebRtcIsac_InitPitchFilter(PitchFiltstr* pitchfiltdata); void WebRtcIsac_InitPitchAnalysis(PitchAnalysisStruct* state); void WebRtcIsac_InitPreFilterbank(PreFiltBankstr* prefiltdata); double WebRtcIsac_LevDurb(double* a, double* k, double* r, size_t order); /* The number of all-pass filter factors in an upper or lower channel*/ #define NUMBEROFCHANNELAPSECTIONS 2 /* The upper channel all-pass filter factors */ extern const float WebRtcIsac_kUpperApFactorsFloat[2]; /* The lower channel all-pass filter factors */ extern const float WebRtcIsac_kLowerApFactorsFloat[2]; void WebRtcIsac_AllPassFilter2Float(float* InOut, const float* APSectionFactors, int lengthInOut, int NumberOfSections, float* FilterState); void WebRtcIsac_SplitAndFilterFloat(float* in, float* LP, float* HP, double* LP_la, double* HP_la, PreFiltBankstr* prefiltdata); #endif // MODULES_AUDIO_CODING_CODECS_ISAC_MAIN_SOURCE_ISAC_VAD_H_
// Calculate cross product between two vectors (used to determine vector normal used to get ray that is perpendicular to an object) vec3_t vec3_cross(vec3_t a, vec3_t b) { vec3_t result = { .x = a.y * b.z - a.z * b.y, .y = a.z * b.x - a.x * b.z, .z = a.x * b.y - a.y * b.x }; return result; }
#include <stdio.h> #include<math.h> int main(void) { int q; scanf("%d\n",&q); while(q--){ int n; scanf("%d\n",&n); int j; for(j=0;(pow(3,j)-1)/2<n;j++){ } int s; s=(pow(3,j)-1)/2; int p=j-2; while(p+1){ if(s-pow(3,p)>=n){ s-=pow(3,p); } p--; } printf("%d\n",s); } return 0; }
/* * With pci bus iommu support, we maintain one pool per pcidev and a * hashed reverse table for virtual to bus physical address translations. */ static m_addr_t ___dma_getp(m_pool_s *mp) { m_addr_t vp; m_vtob_s *vbp; vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB"); if (vbp) { dma_addr_t daddr; vp = (m_addr_t) pci_alloc_consistent(mp->bush, PAGE_SIZE<<MEMO_PAGE_ORDER, &daddr); if (vp) { int hc = VTOB_HASH_CODE(vp); vbp->vaddr = vp; vbp->baddr = daddr; vbp->next = mp->vtob[hc]; mp->vtob[hc] = vbp; ++mp->nump; return vp; } } if (vbp) __m_free(&mp0, vbp, sizeof(*vbp), "VTOB"); return 0; }
/* Construct a 128-bit value from four 32-bit ints. */ static IRExpr* mk128from32s ( IRTemp t3, IRTemp t2, IRTemp t1, IRTemp t0 ) { return binop( Iop_64HLtoV128, binop(Iop_32HLto64, mkexpr(t3), mkexpr(t2)), binop(Iop_32HLto64, mkexpr(t1), mkexpr(t0)) ); }
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ravl_interval.c -- ravl_interval implementation */ #include "alloc.h" #include "map.h" #include "ravl_interval.h" #include "pmem2_utils.h" #include "sys_util.h" #include "os_thread.h" #include "ravl.h" /* * ravl_interval - structure representing two points * on the number line */ struct ravl_interval { struct ravl *tree; ravl_interval_min *get_min; ravl_interval_max *get_max; }; /* * ravl_interval_node - structure holding min, max functions and address */ struct ravl_interval_node { void *addr; ravl_interval_min *get_min; ravl_interval_max *get_max; }; /* * ravl_interval_compare -- compare intervals by its boundaries, * no overlapping allowed */ static int ravl_interval_compare(const void *lhs, const void *rhs) { const struct ravl_interval_node *left = lhs; const struct ravl_interval_node *right = rhs; if (left->get_min(left->addr) < right->get_min(right->addr) && left->get_max(left->addr) <= right->get_min(right->addr)) return -1; if (left->get_min(left->addr) > right->get_min(right->addr) && left->get_max(left->addr) >= right->get_min(right->addr)) return 1; return 0; } /* * ravl_interval_delete - finalize the ravl interval module */ void ravl_interval_delete(struct ravl_interval *ri) { ravl_delete(ri->tree); ri->tree = NULL; Free(ri); } /* * ravl_interval_new -- initialize the ravl interval module */ struct ravl_interval * ravl_interval_new(ravl_interval_min *get_min, ravl_interval_max *get_max) { int ret; struct ravl_interval *interval = pmem2_malloc(sizeof(*interval), &ret); if (ret) goto ret_null; interval->tree = ravl_new_sized(ravl_interval_compare, sizeof(struct ravl_interval_node)); if (!(interval->tree)) goto free_alloc; interval->get_min = get_min; interval->get_max = get_max; return interval; free_alloc: Free(interval); ret_null: return NULL; } /* * ravl_interval_insert -- insert interval entry into the tree */ int ravl_interval_insert(struct ravl_interval *ri, void *addr) { struct ravl_interval_node rin; rin.addr = addr; rin.get_min = ri->get_min; rin.get_max = ri->get_max; if (ravl_emplace_copy(ri->tree, &rin)) return PMEM2_E_ERRNO; return 0; } /* * ravl_interval_remove -- remove interval entry from the tree */ int ravl_interval_remove(struct ravl_interval *ri, struct ravl_interval_node *rin) { struct ravl_node *node = ravl_find(ri->tree, rin, RAVL_PREDICATE_EQUAL); if (!node) return PMEM2_E_MAPPING_NOT_FOUND; ravl_remove(ri->tree, node); return 0; } /* * ravl_interval_find_prior_or_eq -- find overlapping interval starting prior to * the current one or at the same place */ static struct ravl_interval_node * ravl_interval_find_prior_or_eq(struct ravl *tree, struct ravl_interval_node *rin) { struct ravl_node *node; struct ravl_interval_node *cur; node = ravl_find(tree, rin, RAVL_PREDICATE_LESS_EQUAL); if (!node) return NULL; cur = ravl_data(node); /* * If the end of the found interval is below the searched boundary, then * this is not our interval. */ if (cur->get_max(cur->addr) <= rin->get_min(rin->addr)) return NULL; return cur; } /* * ravl_interval_find_later -- find overlapping interval starting later than * the current one */ static struct ravl_interval_node * ravl_interval_find_later(struct ravl *tree, struct ravl_interval_node *rin) { struct ravl_node *node; struct ravl_interval_node *cur; node = ravl_find(tree, rin, RAVL_PREDICATE_GREATER); if (!node) return NULL; cur = ravl_data(node); /* * If the beginning of the found interval is above the end of * the searched range, then this is not our interval. */ if (cur->get_min(cur->addr) >= rin->get_max(rin->addr)) return NULL; return cur; } /* * ravl_interval_find_equal -- find the interval with exact (min, max) range */ struct ravl_interval_node * ravl_interval_find_equal(struct ravl_interval *ri, void *addr) { struct ravl_interval_node range; range.addr = addr; range.get_min = ri->get_min; range.get_max = ri->get_max; struct ravl_node *node; node = ravl_find(ri->tree, &range, RAVL_PREDICATE_EQUAL); if (!node) return NULL; return ravl_data(node); } /* * ravl_interval_find -- find the earliest interval within (min, max) range */ struct ravl_interval_node * ravl_interval_find(struct ravl_interval *ri, void *addr) { struct ravl_interval_node range; range.addr = addr; range.get_min = ri->get_min; range.get_max = ri->get_max; struct ravl_interval_node *cur; cur = ravl_interval_find_prior_or_eq(ri->tree, &range); if (!cur) cur = ravl_interval_find_later(ri->tree, &range); return cur; } /* * ravl_interval_data -- returns the data contained within interval node */ void * ravl_interval_data(struct ravl_interval_node *rin) { return (void *)rin->addr; }
/** * hostapd_cleanup - Per-BSS cleanup (deinitialization) * @hapd: Pointer to BSS data * * This function is used to free all per-BSS data structures and resources. * This gets called in a loop for each BSS between calls to * hostapd_cleanup_iface_pre() and hostapd_cleanup_iface() when an interface * is deinitialized. Most of the modules that are initialized in * hostapd_setup_bss() are deinitialized here. */ static void hostapd_cleanup(struct hostapd_data *hapd) { if (hapd->iface->ctrl_iface_deinit) hapd->iface->ctrl_iface_deinit(hapd); iapp_deinit(hapd->iapp); hapd->iapp = NULL; accounting_deinit(hapd); hostapd_deinit_wpa(hapd); vlan_deinit(hapd); hostapd_acl_deinit(hapd); #ifndef CONFIG_NO_RADIUS radius_client_deinit(hapd->radius); hapd->radius = NULL; #endif hostapd_deinit_wps(hapd); authsrv_deinit(hapd); if (hapd->interface_added && hostapd_if_remove(hapd, WPA_IF_AP_BSS, hapd->conf->iface)) { wpa_printf(MSG_WARNING, "Failed to remove BSS interface %s", hapd->conf->iface); } os_free(hapd->probereq_cb); hapd->probereq_cb = NULL; }
/* ===-- ashlti3.c - Implement __ashlti3 -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __ashlti3 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ #include "int_lib.h" #ifdef CRT_HAS_128BIT /* Returns: a << b */ /* Precondition: 0 <= b < bits_in_tword */ COMPILER_RT_ABI ti_int __ashlti3(ti_int a, si_int b) { const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT); twords input; twords result; input.all = a; if (b & bits_in_dword) /* bits_in_dword <= b < bits_in_tword */ { result.s.low = 0; result.s.high = input.s.low << (b - bits_in_dword); } else /* 0 <= b < bits_in_dword */ { if (b == 0) return a; result.s.low = input.s.low << b; result.s.high = (input.s.high << b) | (input.s.low >> (bits_in_dword - b)); } return result.all; } #endif /* CRT_HAS_128BIT */
// Make a smaller rect by trimming the edges of a larger one. // static GRect bsky_rect_trim (const GRect rect, const int8_t trim) { const GRect result = { .origin = { .x = rect.origin.x + trim, .y = rect.origin.y + trim, }, .size = { .w = rect.size.w - trim*2, .h = rect.size.h - trim*2, }, }; return result; }
/*------ ROUTINE: tsqr_BBKS ----- INPUT: ccl_parameters and k wavenumber in Mpc^-1 TASK: provide the square of the BBKS transfer function with baryonic correction NOTE: Bardeen et al. (1986) as implemented in Sugiyama (1995) */ static double tsqr_BBKS(ccl_parameters* params, double k) { double tfac = params->T_CMB / 2.7; double q = tfac * tfac * k / ( params->Omega_m * params->h * params->h * exp(-params->Omega_b * (1.0 + pow(2. * params->h, .5) / params->Omega_m))); return ( pow(log(1. + 2.34*q) / (2.34*q), 2.0) / pow(1. + 3.89*q + pow(16.1*q, 2.0) + pow(5.46*q, 3.0) + pow(6.71*q, 4.0), 0.5)); }
/** * Put the ADXL362 into a lower power standby state without gating power * @Note Accel need to stay * @todo Implement this function */ void ACCEL_standby() { lowPowerSleep(LPM_5ms); SPI_transaction(gpRxBuf, (uint8_t*)&ADXL_RESET, sizeof(ADXL_RESET),ACCEL_SPI); lowPowerSleep(LPM_1ms); SPI_transaction(gpRxBuf, (uint8_t*)&ADXL_CONFIG_STBY, sizeof(ADXL_CONFIG_STBY),ACCEL_SPI); }
//! ---------------------------------------------------------------------------- //! \brief Decrypt and internally validate a provided token //! \return: 0 on success //! -1 on failure to decrypt the token //! -2 on failure to validate the cryptographic tag //! -3 when provided with invalid arguments //! \param: ao_plaintext The output argument that is populated with the plaintext value. Should be a_token_len bytes. //! \param: ao_plaintext_len This argument should initially point to an integer specifying the size of the buffer at ao_plaintext. //! On function exit the integer will be set to the length of the decrypted token //! \param: a_token A pointer to the ciphertext //! \param: a_token_len The length of the provided ciphertext. 1 <= a_token_len <= 6144 //! \param: a_key A pointer to the key to use to decrypt. Should be securely generated and at least 32 bytes long. //! \param: a_key_len The length of the provided key. 1 <= a_key_len <= 4096 //! ---------------------------------------------------------------------------- int ectoken_decrypt_token(char* ao_plaintext, int* ao_plaintext_len, const char* a_token, const int a_token_len, const char* a_key, const int a_key_len) { if(!ao_plaintext || !ao_plaintext_len || !a_token || !a_token_len || !a_key || !a_key_len) return -3; if(a_token_len > 6144 || a_token_len < 0 || a_key_len > 4096 || a_key_len < 0) return -3; if(ectoken_encrypt_required_size(0) > a_token_len) return -3; if(ectoken_decrypt_required_size(a_token_len) > *ao_plaintext_len) return -3; memset(ao_plaintext, 0, *ao_plaintext_len); int l_ret = 0; int l_ciphertext_len = a_token_len; unsigned char l_ciphertext[l_ciphertext_len]; unsigned char l_iv[G_IV_LEN]; unsigned char l_tag[G_TAG_LEN]; unsigned char l_key[G_KEY_LEN]; int l_token_len = a_token_len; unsigned char l_token[l_token_len]; memset(l_token, 0, l_token_len); l_ret = sha256(l_key, a_key, a_key_len); if (l_ret < 32) { l_ret = -3; goto cleanup; } l_ciphertext_len = deconstruct_base64_encoded_token(l_ciphertext, l_iv, G_IV_LEN, l_tag, G_TAG_LEN, (unsigned char*) a_token, a_token_len); if (l_ciphertext_len <= 0) { return -1; } l_ret = ec_decrypt(l_token, &l_token_len, l_ciphertext, l_ciphertext_len, (unsigned char*)l_key, l_iv, G_IV_LEN, l_tag, G_TAG_LEN); switch (l_ret) { case 0: break; case -7: l_ret = -2; goto cleanup; break; case -1: case -2: case -3: case -4: case -5: case -6: default: l_ret = -1; goto cleanup; break; } if (l_token_len > *ao_plaintext_len) { l_ret = -3; goto cleanup; } memcpy(ao_plaintext, l_token, l_token_len); ao_plaintext[l_token_len] = 0x0; *ao_plaintext_len = l_token_len; cleanup: OPENSSL_cleanse(l_key, G_KEY_LEN); OPENSSL_cleanse(l_iv, G_IV_LEN); OPENSSL_cleanse(l_tag, G_TAG_LEN); OPENSSL_cleanse(l_ciphertext, l_ciphertext_len); OPENSSL_cleanse(l_token, l_token_len); return l_ret; }
/// Add an entry to the end of the list of errors. /// /// @param qi quickfix list /// @param prevp nonnull pointer (to previously added entry or NULL) /// @param dir optional directory name /// @param fname file name or NULL /// @param bufnum buffer number or zero /// @param mesg message /// @param lnum line number /// @param col column /// @param vis_col using visual column /// @param pattern search pattern /// @param nr error number /// @param type type character /// @param valid valid entry /// /// @returns OK or FAIL. static int qf_add_entry(qf_info_T *qi, qfline_T **prevp, char_u *dir, char_u *fname, int bufnum, char_u *mesg, long lnum, int col, char_u vis_col, char_u *pattern, int nr, char_u type, char_u valid) { qfline_T *qfp = xmalloc(sizeof(qfline_T)); if (bufnum != 0) qfp->qf_fnum = bufnum; else qfp->qf_fnum = qf_get_fnum(dir, fname); qfp->qf_text = vim_strsave(mesg); qfp->qf_lnum = lnum; qfp->qf_col = col; qfp->qf_viscol = vis_col; if (pattern == NULL || *pattern == NUL) { qfp->qf_pattern = NULL; } else { qfp->qf_pattern = vim_strsave(pattern); } qfp->qf_nr = nr; if (type != 1 && !vim_isprintc(type)) type = 0; qfp->qf_type = type; qfp->qf_valid = valid; if (qi->qf_lists[qi->qf_curlist].qf_count == 0) { qi->qf_lists[qi->qf_curlist].qf_start = qfp; qfp->qf_prev = qfp; } else { assert(*prevp); qfp->qf_prev = *prevp; (*prevp)->qf_next = qfp; } qfp->qf_next = qfp; qfp->qf_cleared = FALSE; *prevp = qfp; ++qi->qf_lists[qi->qf_curlist].qf_count; if (qi->qf_lists[qi->qf_curlist].qf_index == 0 && qfp->qf_valid) { qi->qf_lists[qi->qf_curlist].qf_index = qi->qf_lists[qi->qf_curlist].qf_count; qi->qf_lists[qi->qf_curlist].qf_ptr = qfp; } return OK; }
/* * hub int-in complete function */ static void hub_irq(URB_T *urb) { USB_HUB_T *hub = (USB_HUB_T *)urb->context; if (urb->status) { if (urb->status == USB_ERR_NOENT) return; USB_warning("nonzero status in irq %d\n", urb->status); if ((++hub->nerrors < 10) || hub->error) return; hub->error = urb->status; } hub->nerrors = 0; if (list_empty(&hub->event_list)) { list_add(&hub->event_list, &_HubEventList); } }
/* * Begin compressing data on `desc' */ bool compressStart(D_SOCKET *dsock, unsigned char teleopt) { z_stream *s; if (dsock->out_compress) return true; s = (z_stream *) malloc(sizeof(*s)); dsock->out_compress_buf = (unsigned char *) malloc(COMPRESS_BUF_SIZE); s->next_in = NULL; s->avail_in = 0; s->next_out = dsock->out_compress_buf; s->avail_out = COMPRESS_BUF_SIZE; s->zalloc = zlib_alloc; s->zfree = zlib_free; s->opaque = NULL; if (deflateInit(s, 9) != Z_OK) { free(dsock->out_compress_buf); free(s); return false; } if (teleopt == TELOPT_COMPRESS) text_to_socket(dsock, (char *) enable_compress); else if (teleopt == TELOPT_COMPRESS2) text_to_socket(dsock, (char *) enable_compress2); else { bug("Bad teleoption %d passed", teleopt); free(dsock->out_compress_buf); free(s); return false; } dsock->compressing = teleopt; dsock->out_compress = s; return true; }
/*********************************************************************** * MENU_DoNextMenu * * NOTE: WM_NEXTMENU documented in Win32 is a bit different. */ static LRESULT MENU_DoNextMenu( MTRACKER* pmt, UINT vk ) { POPUPMENU *menu = MENU_GetMenu( pmt->hTopMenu ); if( (vk == VK_LEFT && menu->FocusedItem == 0 ) || (vk == VK_RIGHT && menu->FocusedItem == menu->nItems - 1)) { MDINEXTMENU next_menu; HMENU hNewMenu; HWND hNewWnd; UINT id = 0; next_menu.hmenuIn = (IS_SYSTEM_MENU(menu)) ? GetSubMenu(pmt->hTopMenu,0) : pmt->hTopMenu; next_menu.hmenuNext = 0; next_menu.hwndNext = 0; SendMessageW( pmt->hOwnerWnd, WM_NEXTMENU, vk, (LPARAM)&next_menu ); TRACE("%p [%p] -> %p [%p]\n", pmt->hCurrentMenu, pmt->hOwnerWnd, next_menu.hmenuNext, next_menu.hwndNext ); if (!next_menu.hmenuNext || !next_menu.hwndNext) { DWORD style = GetWindowLongW( pmt->hOwnerWnd, GWL_STYLE ); hNewWnd = pmt->hOwnerWnd; if( IS_SYSTEM_MENU(menu) ) { if(style & WS_CHILD || !(hNewMenu = GetMenu(hNewWnd))) return FALSE; if( vk == VK_LEFT ) { menu = MENU_GetMenu( hNewMenu ); id = menu->nItems - 1; } } else if (style & WS_SYSMENU ) { hNewMenu = get_win_sys_menu( hNewWnd ); } else return FALSE; } else { hNewMenu = next_menu.hmenuNext; hNewWnd = WIN_GetFullHandle( next_menu.hwndNext ); if( IsMenu(hNewMenu) && IsWindow(hNewWnd) ) { DWORD style = GetWindowLongW( hNewWnd, GWL_STYLE ); if (style & WS_SYSMENU && GetSubMenu(get_win_sys_menu(hNewWnd), 0) == hNewMenu ) { hNewMenu = get_win_sys_menu(hNewWnd); } else if (style & WS_CHILD || GetMenu(hNewWnd) != hNewMenu ) { TRACE(" -- got confused.\n"); return FALSE; } } else return FALSE; } if( hNewMenu != pmt->hTopMenu ) { MENU_SelectItem( pmt->hOwnerWnd, pmt->hTopMenu, NO_SELECTED_ITEM, FALSE, 0 ); if( pmt->hCurrentMenu != pmt->hTopMenu ) MENU_HideSubPopups( pmt->hOwnerWnd, pmt->hTopMenu, FALSE ); } if( hNewWnd != pmt->hOwnerWnd ) { pmt->hOwnerWnd = hNewWnd; MENU_SetCapture( pmt->hOwnerWnd ); } pmt->hTopMenu = pmt->hCurrentMenu = hNewMenu; MENU_SelectItem( pmt->hOwnerWnd, pmt->hTopMenu, id, TRUE, 0 ); return TRUE; } return FALSE; }
/** * Dummy function for setting the colour palette, which we actually never * touch. However, the server still requires us to provide this. */ static void vboxLoadPalette(ScrnInfoPtr pScrn, int numColors, int *indices, LOCO *colors, VisualPtr pVisual) { (void)pScrn; (void) numColors; (void) indices; (void) colors; (void)pVisual; }
/* * Initialize an existing vm_map structure such as that in the vmspace * structure. The pmap is initialized elsewhere. * * No requirements. */ void vm_map_init(struct vm_map *map, vm_offset_t min_addr, vm_offset_t max_addr, pmap_t pmap) { RB_INIT(&map->rb_root); spin_init(&map->ilock_spin, "ilock"); map->ilock_base = NULL; map->nentries = 0; map->size = 0; map->system_map = 0; vm_map_min(map) = min_addr; vm_map_max(map) = max_addr; map->pmap = pmap; map->timestamp = 0; map->flags = 0; bzero(&map->freehint, sizeof(map->freehint)); lwkt_token_init(&map->token, "vm_map"); lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0); }
/* Calculate stack offsets. These are used to calculate register elimination offsets and in prologue/epilogue code. Also calculates which registers should be saved. */ static arm_stack_offsets * arm_get_frame_offsets (void) { struct arm_stack_offsets *offsets; unsigned long func_type; int leaf; int saved; int core_saved; HOST_WIDE_INT frame_size; int i; offsets = &cfun->machine->stack_offsets; if (reload_completed) return offsets; frame_size = ROUND_UP_WORD (get_frame_size ()); leaf = leaf_function_p (); offsets->saved_args = crtl->args.pretend_args_size; offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0) + arm_compute_static_chain_stack_bytes(); if (TARGET_32BIT) { unsigned int regno; offsets->saved_regs_mask = arm_compute_save_reg_mask (); core_saved = bit_count (offsets->saved_regs_mask) * 4; saved = core_saved; if (TARGET_REALLY_IWMMXT) { for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++) if (df_regs_ever_live_p (regno) && ! call_used_regs[regno]) saved += 8; } func_type = arm_current_func_type (); if (! IS_VOLATILE (func_type)) { for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++) if (df_regs_ever_live_p (regno) && ! call_used_regs[regno]) saved += 12; if (TARGET_HARD_FLOAT && TARGET_VFP) saved += arm_get_vfp_saved_size (); } } else { offsets->saved_regs_mask = thumb1_compute_save_reg_mask (); core_saved = bit_count (offsets->saved_regs_mask) * 4; saved = core_saved; if (TARGET_BACKTRACE) saved += 16; } offsets->saved_regs = offsets->saved_args + saved + arm_compute_static_chain_stack_bytes(); offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE; if (leaf && frame_size == 0 && ! cfun->calls_alloca) { offsets->outgoing_args = offsets->soft_frame; offsets->locals_base = offsets->soft_frame; return offsets; } if (ARM_DOUBLEWORD_ALIGN && (offsets->soft_frame & 7)) { offsets->soft_frame += 4; if (frame_size + crtl->outgoing_args_size == 0) { int reg = -1; if (! any_sibcall_uses_r3 () && arm_size_return_regs () <= 12 && (offsets->saved_regs_mask & (1 << 3)) == 0) { reg = 3; } else for (i = 4; i <= (TARGET_THUMB1 ? LAST_LO_REGNUM : 11); i++) { if ((offsets->saved_regs_mask & (1 << i)) == 0) { reg = i; break; } } if (reg != -1) { offsets->saved_regs += 4; offsets->saved_regs_mask |= (1 << reg); } } } offsets->locals_base = offsets->soft_frame + frame_size; offsets->outgoing_args = (offsets->locals_base + crtl->outgoing_args_size); if (ARM_DOUBLEWORD_ALIGN) { if (offsets->outgoing_args & 7) offsets->outgoing_args += 4; gcc_assert (!(offsets->outgoing_args & 7)); } return offsets; }
/** * @brief Calculate and Dispaly the humidity value on LCD * @param DisplayValue : Relative Humidity in percent * @retval None */ static void Display_Humidity(uint32_t DisplayHumidity) { uint8_t LCDstr[20] = {0}; sprintf((char*)LCDstr, " %d %% ", DisplayHumidity); LCD_DisplayStringLine(LINE(4), (uint8_t *) LCDstr); if (RelativeHumidity < RHmin) { RHmin = (uint8_t) RelativeHumidity; } if (RelativeHumidity > RHmax) { RHmax = (uint8_t) RelativeHumidity; } sprintf((char*)LCDstr, "Min= %d%% Max= %d%% ", RHmin,RHmax); LCD_DisplayStringLine(LINE(9), (uint8_t *) LCDstr); }
/** \brief wakeup the CryptoAuth device * \param[in] device Device context pointer * \return ATCA_SUCCESS on success, otherwise an error code. */ ATCA_STATUS calib_wakeup(ATCADevice device) { if (device == NULL) { return ATCA_TRACE(ATCA_BAD_PARAM, "NULL pointer received"); } return atwake(device->mIface); }
/*++ Copyright (c) Microsoft Corporation Licensed under the MIT license. Module Name: - conddkrefs.h Abstract: - Contains headers that are a part of the public DDK. - We don't include both the DDK and the SDK at the same time because they mesh poorly and it's easier to include a copy of the infrequently changing defs here. --*/ #pragma once #ifndef _DDK_INCLUDED #include <winternl.h> extern "C" { #pragma region wdm.h(public DDK) // UNICODE_STRING extern "C++" { char _RTL_CONSTANT_STRING_type_check(const char* s); char _RTL_CONSTANT_STRING_type_check(const WCHAR* s); template<size_t N> class _RTL_CONSTANT_STRING_remove_const_template_class; template<> class _RTL_CONSTANT_STRING_remove_const_template_class<sizeof(char)> { public: typedef char T; }; template<> class _RTL_CONSTANT_STRING_remove_const_template_class<sizeof(WCHAR)> { public: typedef WCHAR T; }; #define _RTL_CONSTANT_STRING_remove_const_macro(s) \ (const_cast<_RTL_CONSTANT_STRING_remove_const_template_class<sizeof((s)[0])>::T*>(s)) #define RTL_CONSTANT_STRING(s) \ { \ sizeof(s) - sizeof((s)[0]), \ sizeof(s) / sizeof(_RTL_CONSTANT_STRING_type_check(s)), \ _RTL_CONSTANT_STRING_remove_const_macro(s) \ } } // // Define the file system information class values // // WARNING: The order of the following values are assumed by the I/O system. // Any changes made here should be reflected there as well. // clang-format off typedef enum _FSINFOCLASS { FileFsVolumeInformation = 1, FileFsLabelInformation, // 2 FileFsSizeInformation, // 3 FileFsDeviceInformation, // 4 FileFsAttributeInformation, // 5 FileFsControlInformation, // 6 FileFsFullSizeInformation, // 7 FileFsObjectIdInformation, // 8 FileFsDriverPathInformation, // 9 FileFsVolumeFlagsInformation, // 10 FileFsSectorSizeInformation, // 11 FileFsDataCopyInformation, // 12 FileFsMetadataSizeInformation, // 13 FileFsMaximumInformation } FS_INFORMATION_CLASS, *PFS_INFORMATION_CLASS; // clang-format on #ifndef DEVICE_TYPE #define DEVICE_TYPE DWORD #endif typedef struct _FILE_FS_DEVICE_INFORMATION { DEVICE_TYPE DeviceType; ULONG Characteristics; } FILE_FS_DEVICE_INFORMATION, *PFILE_FS_DEVICE_INFORMATION; #pragma region IOCTL codes // // Define the various device type values. Note that values used by Microsoft // Corporation are in the range 0-32767, and 32768-65535 are reserved for use // by customers. // #ifndef FILE_DEVICE_CONSOLE #define FILE_DEVICE_CONSOLE 0x00000050 #endif // // Macro definition for defining IOCTL and FSCTL function control codes. Note // that function codes 0-2047 are reserved for Microsoft Corporation, and // 2048-4095 are reserved for customers. // #ifndef CTL_CODE #define CTL_CODE(DeviceType, Function, Method, Access) ( \ ((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method)) #endif // // Define the method codes for how buffers are passed for I/O and FS controls // #define METHOD_BUFFERED 0 #define METHOD_IN_DIRECT 1 #define METHOD_OUT_DIRECT 2 #ifndef METHOD_NEITHER #define METHOD_NEITHER 3 #endif // // Define some easier to comprehend aliases: // METHOD_DIRECT_TO_HARDWARE (writes, aka METHOD_IN_DIRECT) // METHOD_DIRECT_FROM_HARDWARE (reads, aka METHOD_OUT_DIRECT) // #define METHOD_DIRECT_TO_HARDWARE METHOD_IN_DIRECT #define METHOD_DIRECT_FROM_HARDWARE METHOD_OUT_DIRECT #pragma endregion #pragma endregion #pragma region ntifs.h(public DDK) #define RtlOffsetToPointer(B, O) ((PCHAR)(((PCHAR)(B)) + ((ULONG_PTR)(O)))) __kernel_entry NTSYSCALLAPI NTSTATUS NTAPI NtQueryVolumeInformationFile( _In_ HANDLE FileHandle, _Out_ PIO_STATUS_BLOCK IoStatusBlock, _Out_writes_bytes_(Length) PVOID FsInformation, _In_ ULONG Length, _In_ FS_INFORMATION_CLASS FsInformationClass); #pragma endregion // InteractivityOneCore depends on this private function. The IsPresent checks // are automatically generated by forwarder.template and aren't part of the DDK. // I've placed it here because I couldn't come up with a better place. BOOL IsGetSystemMetricsPresent(); } #endif // _DDK_INCLUDED
/** write a monochrome pixel to the colour LCD **/ static fb_data pixel_to_lcd_gray(void) { int r, g, b; g = clamp_component(pixel->g); r = component_to_lcd(g, LCD_RED_BITS, NODITHER_DELTA); b = component_to_lcd(g, LCD_BLUE_BITS, NODITHER_DELTA); g = component_to_lcd(g, LCD_GREEN_BITS, NODITHER_DELTA); return FB_RGBPACK_LCD(r, g, b); }
/* * Error context callback for errors occurring during shared buffer writes. */ static void shared_buffer_write_error_callback(void *arg) { volatile BufferDesc *bufHdr = (volatile BufferDesc *)arg; if (bufHdr != ((void *)0)) { char *path = relpathbackend(bufHdr -> tag . rnode,- 1,bufHdr -> tag . forkNum); errcontext("writing block %u of relation %s",bufHdr -> tag . blockNum,path); pfree(path); } }
/*check if the resiude is water sent By Brian*/ int is_water(topo_mol_residue_t *res) { int noh_bonds; topo_mol_atom_t *atom, *atom2; topo_mol_bond_t *bond; atom = res->atomArray[0]; if (!is_oxygen(atom)) return 0; noh_bonds = 0; for (bond = atom->bonds; bond; bond = topo_mol_bond_next(bond, atom)) { atom2 = (bond->atom[0] == atom ? bond->atom[1] : bond->atom[0]); if (is_hydrogen(atom2)) ++noh_bonds; } if (noh_bonds == 2) return 1; return 0; }
/* * Configuation settings for the Renesas R7780MP board * * Copyright (C) 2007,2008 Nobuhiro Iwamatsu <iwamatsu@nigauri.org> * Copyright (C) 2008 Yusuke Goda <goda.yusuke@renesas.com> * * SPDX-License-Identifier: GPL-2.0+ */ #ifndef __R7780RP_H #define __R7780RP_H #undef DEBUG #define CONFIG_CPU_SH7780 1 #define CONFIG_R7780MP 1 #define CONFIG_SYS_R7780MP_OLD_FLASH 1 #define __LITTLE_ENDIAN__ 1 /* * Command line configuration. */ #define CONFIG_CMD_SDRAM #define CONFIG_CMD_FLASH #define CONFIG_CMD_MEMORY #define CONFIG_CMD_PCI #define CONFIG_CMD_NET #define CONFIG_CMD_PING #define CONFIG_CMD_SAVEENV #define CONFIG_CMD_NFS #define CONFIG_CMD_IDE #define CONFIG_CMD_EXT2 #define CONFIG_DOS_PARTITION #define CONFIG_SCIF_CONSOLE 1 #define CONFIG_BAUDRATE 115200 #define CONFIG_CONS_SCIF0 1 #define CONFIG_BOOTDELAY 3 #define CONFIG_BOOTARGS "console=ttySC0,115200" #define CONFIG_ENV_OVERWRITE 1 /* check for keypress on bootdelay==0 */ /*#define CONFIG_ZERO_BOOTDELAY_CHECK*/ #define CONFIG_SYS_TEXT_BASE 0x0FFC0000 #define CONFIG_SYS_SDRAM_BASE (0x08000000) #define CONFIG_SYS_SDRAM_SIZE (128 * 1024 * 1024) #define CONFIG_SYS_LONGHELP #define CONFIG_SYS_CBSIZE 256 #define CONFIG_SYS_PBSIZE 256 #define CONFIG_SYS_MAXARGS 16 #define CONFIG_SYS_BARGSIZE 512 #define CONFIG_SYS_MEMTEST_START (CONFIG_SYS_SDRAM_BASE) #define CONFIG_SYS_MEMTEST_END (CONFIG_SYS_TEXT_BASE - 0x100000) /* Flash board support */ #define CONFIG_SYS_FLASH_BASE (0xA0000000) #ifdef CONFIG_SYS_R7780MP_OLD_FLASH /* NOR Flash (S29PL127J60TFI130) */ # define CONFIG_SYS_FLASH_CFI_WIDTH FLASH_CFI_32BIT # define CONFIG_SYS_MAX_FLASH_BANKS (2) # define CONFIG_SYS_MAX_FLASH_SECT 270 # define CONFIG_SYS_FLASH_BANKS_LIST { CONFIG_SYS_FLASH_BASE,\ CONFIG_SYS_FLASH_BASE + 0x100000,\ CONFIG_SYS_FLASH_BASE + 0x400000,\ CONFIG_SYS_FLASH_BASE + 0x700000, } #else /* CONFIG_SYS_R7780MP_OLD_FLASH */ /* NOR Flash (Spantion S29GL256P) */ # define CONFIG_SYS_MAX_FLASH_BANKS (1) # define CONFIG_SYS_MAX_FLASH_SECT 256 # define CONFIG_SYS_FLASH_BANKS_LIST { CONFIG_SYS_FLASH_BASE } #endif /* CONFIG_SYS_R7780MP_OLD_FLASH */ #define CONFIG_SYS_LOAD_ADDR (CONFIG_SYS_SDRAM_BASE + 4 * 1024 * 1024) /* Address of u-boot image in Flash */ #define CONFIG_SYS_MONITOR_BASE (CONFIG_SYS_FLASH_BASE) #define CONFIG_SYS_MONITOR_LEN (256 * 1024) /* Size of DRAM reserved for malloc() use */ #define CONFIG_SYS_MALLOC_LEN (1204 * 1024) #define CONFIG_SYS_BOOTMAPSZ (8 * 1024 * 1024) #define CONFIG_SYS_RX_ETH_BUFFER (8) #define CONFIG_SYS_FLASH_CFI #define CONFIG_FLASH_CFI_DRIVER #undef CONFIG_SYS_FLASH_CFI_BROKEN_TABLE #undef CONFIG_SYS_FLASH_QUIET_TEST /* print 'E' for empty sector on flinfo */ #define CONFIG_SYS_FLASH_EMPTY_INFO #define CONFIG_ENV_IS_IN_FLASH #define CONFIG_ENV_SECT_SIZE (256 * 1024) #define CONFIG_ENV_SIZE (CONFIG_ENV_SECT_SIZE) #define CONFIG_ENV_ADDR (CONFIG_SYS_MONITOR_BASE + CONFIG_SYS_MONITOR_LEN) #define CONFIG_SYS_FLASH_ERASE_TOUT 120000 #define CONFIG_SYS_FLASH_WRITE_TOUT 500 /* Board Clock */ #define CONFIG_SYS_CLK_FREQ 33333333 #define CONFIG_SH_TMU_CLK_FREQ CONFIG_SYS_CLK_FREQ #define CONFIG_SH_SCIF_CLK_FREQ CONFIG_SYS_CLK_FREQ #define CONFIG_SYS_TMU_CLK_DIV 4 /* PCI Controller */ #if defined(CONFIG_CMD_PCI) #define CONFIG_PCI #define CONFIG_SH4_PCI #define CONFIG_SH7780_PCI #define CONFIG_SH7780_PCI_LSR 0x07f00001 #define CONFIG_SH7780_PCI_LAR CONFIG_SYS_SDRAM_SIZE #define CONFIG_SH7780_PCI_BAR CONFIG_SYS_SDRAM_SIZE #define CONFIG_PCI_PNP #define CONFIG_PCI_SCAN_SHOW 1 #define __io #define __mem_pci #define CONFIG_PCI_MEM_BUS 0xFD000000 /* Memory space base addr */ #define CONFIG_PCI_MEM_PHYS CONFIG_PCI_MEM_BUS #define CONFIG_PCI_MEM_SIZE 0x01000000 /* Size of Memory window */ #define CONFIG_PCI_IO_BUS 0xFE200000 /* IO space base address */ #define CONFIG_PCI_IO_PHYS CONFIG_PCI_IO_BUS #define CONFIG_PCI_IO_SIZE 0x00200000 /* Size of IO window */ #define CONFIG_PCI_SYS_PHYS CONFIG_SYS_SDRAM_BASE #define CONFIG_PCI_SYS_BUS CONFIG_SYS_SDRAM_BASE #define CONFIG_PCI_SYS_SIZE CONFIG_SYS_SDRAM_SIZE #endif /* CONFIG_CMD_PCI */ #if defined(CONFIG_CMD_NET) /* #define CONFIG_RTL8169 */ /* AX88796L Support(NE2000 base chip) */ #define CONFIG_DRIVER_AX88796L #define CONFIG_DRIVER_NE2000_BASE 0xA4100000 #endif /* Compact flash Support */ #if defined(CONFIG_CMD_IDE) #define CONFIG_IDE_RESET 1 #define CONFIG_SYS_PIO_MODE 1 #define CONFIG_SYS_IDE_MAXBUS 1 /* IDE bus */ #define CONFIG_SYS_IDE_MAXDEVICE 1 #define CONFIG_SYS_ATA_BASE_ADDR 0xb4000000 #define CONFIG_SYS_ATA_STRIDE 2 /* 1bit shift */ #define CONFIG_SYS_ATA_DATA_OFFSET 0x1000 /* data reg offset */ #define CONFIG_SYS_ATA_REG_OFFSET 0x1000 /* reg offset */ #define CONFIG_SYS_ATA_ALT_OFFSET 0x800 /* alternate register offset */ #define CONFIG_IDE_SWAP_IO #endif /* CONFIG_CMD_IDE */ #endif /* __R7780RP_H */
/* ----------------------------------------------------------------------------- * DohLen() - Defaults to strlen() if not a DOH object * ----------------------------------------------------------------------------- */ int DohLen(const DOH *obj) { DohBase *b = (DohBase *) obj; DohObjInfo *objinfo; if (!b) return 0; if (DohCheck(b)) { objinfo = b->type; if (objinfo->doh_len) { return (objinfo->doh_len) (b); } return 0; } else { return (int)strlen((char *) obj); } }
/* Called after one or more calls to xhci_add_endpoint() or * xhci_drop_endpoint(). If this call fails, the USB core is expected * to call xhci_reset_bandwidth(). * * Since we are in the middle of changing either configuration or * installing a new alt setting, the USB core won't allow URBs to be * enqueued for any endpoint on the old config or interface. Nothing * else should be touching the xhci->devs[slot_id] structure, so we * don't need to take the xhci->lock for manipulating that. */ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { int i; int ret = 0; struct xhci_hcd *xhci; struct xhci_virt_device *virt_dev; struct xhci_input_control_ctx *ctrl_ctx; struct xhci_slot_ctx *slot_ctx; ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); if (ret <= 0) return ret; xhci = hcd_to_xhci(hcd); if (xhci->xhc_state & XHCI_STATE_DYING) return -ENODEV; xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); virt_dev = xhci->devs[udev->slot_id]; ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); if (!ctrl_ctx) { xhci_warn(xhci, "%s: Could not get input context, bad type.\n", __func__); return -ENOMEM; } ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && ctrl_ctx->drop_flags == 0) return 0; xhci_dbg(xhci, "New Input Control Context:\n"); slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); xhci_dbg_ctx(xhci, virt_dev->in_ctx, LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); ret = xhci_configure_endpoint(xhci, udev, NULL, false, false); if (ret) { return ret; } xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); xhci_dbg_ctx(xhci, virt_dev->out_ctx, LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); for (i = 1; i < 31; ++i) { if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); } xhci_zero_in_ctx(xhci, virt_dev); for (i = 1; i < 31; ++i) { if (!virt_dev->eps[i].new_ring) continue; if (virt_dev->eps[i].ring) { xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); } virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; virt_dev->eps[i].new_ring = NULL; } return ret; }
/* * rcm_mod_info() - Return a string describing this module. */ const char * rcm_mod_info(void) { rcm_log_message(RCM_TRACE1, "Bridge: mod_info\n"); return ("Bridge module version 1.0"); }
/* Cover routine to allow wrapping target_enable_exception_catchpoints inside a catch_errors */ static int cover_target_enable_exception_callback (void *arg) { args_for_catchpoint_enable *args = arg; struct symtab_and_line *sal; sal = target_enable_exception_callback (args->kind, args->enable_p); if (sal == NULL) return 0; else if (sal == (struct symtab_and_line *) -1) return -1; else return 1; }
/* irq-mb93093.c: MB93093 FPGA interrupt handling * * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/irc-regs.h> #define __reg16(ADDR) (*(volatile unsigned short *)(__region_CS2 + (ADDR))) #define __get_IMR() ({ __reg16(0x0a); }) #define __set_IMR(M) do { __reg16(0x0a) = (M); wmb(); } while(0) #define __get_IFR() ({ __reg16(0x02); }) #define __clr_IFR(M) do { __reg16(0x02) = ~(M); wmb(); } while(0) /* * off-CPU FPGA PIC operations */ static void frv_fpga_mask(struct irq_data *d) { uint16_t imr = __get_IMR(); imr |= 1 << (d->irq - IRQ_BASE_FPGA); __set_IMR(imr); } static void frv_fpga_ack(struct irq_data *d) { __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); } static void frv_fpga_mask_ack(struct irq_data *d) { uint16_t imr = __get_IMR(); imr |= 1 << (d->irq - IRQ_BASE_FPGA); __set_IMR(imr); __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); } static void frv_fpga_unmask(struct irq_data *d) { uint16_t imr = __get_IMR(); imr &= ~(1 << (d->irq - IRQ_BASE_FPGA)); __set_IMR(imr); } static struct irq_chip frv_fpga_pic = { .name = "mb93093", .irq_ack = frv_fpga_ack, .irq_mask = frv_fpga_mask, .irq_mask_ack = frv_fpga_mask_ack, .irq_unmask = frv_fpga_unmask, }; /* * FPGA PIC interrupt handler */ static irqreturn_t fpga_interrupt(int irq, void *_mask) { uint16_t imr, mask = (unsigned long) _mask; imr = __get_IMR(); mask = mask & ~imr & __get_IFR(); /* poll all the triggered IRQs */ while (mask) { int irq; asm("scan %1,gr0,%0" : "=r"(irq) : "r"(mask)); irq = 31 - irq; mask &= ~(1 << irq); generic_handle_irq(IRQ_BASE_FPGA + irq); } return IRQ_HANDLED; } /* * define an interrupt action for each FPGA PIC output * - use dev_id to indicate the FPGA PIC input to output mappings */ static struct irqaction fpga_irq[1] = { [0] = { .handler = fpga_interrupt, .flags = IRQF_DISABLED, .name = "fpga.0", .dev_id = (void *) 0x0700UL, } }; /* * initialise the motherboard FPGA's PIC */ void __init fpga_init(void) { int irq; /* all PIC inputs are all set to be edge triggered */ __set_IMR(0x0700); __clr_IFR(0x0000); for (irq = IRQ_BASE_FPGA + 8; irq <= IRQ_BASE_FPGA + 10; irq++) irq_set_chip_and_handler(irq, &frv_fpga_pic, handle_edge_irq); /* the FPGA drives external IRQ input #2 on the CPU PIC */ setup_irq(IRQ_CPU_EXTERNAL2, &fpga_irq[0]); }
/* Calls the ndo_add_vxlan_port of the caller in order to * supply the listening VXLAN udp ports. Callers are expected * to implement the ndo_add_vxlan_port. */ void vxlan_get_rx_port(struct net_device *dev) { struct vxlan_sock *vs; struct net *net = dev_net(dev); struct vxlan_net *vn = net_generic(net, vxlan_net_id); sa_family_t sa_family; __be16 port; unsigned int i; spin_lock(&vn->sock_lock); for (i = 0; i < PORT_HASH_SIZE; ++i) { hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { port = inet_sk(vs->sock->sk)->inet_sport; sa_family = vxlan_get_sk_family(vs); dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, port); } } spin_unlock(&vn->sock_lock); }
/**************************************************************************** **************************************************************************** *** *** This header was automatically generated from a Linux kernel header *** of the same name, to make information necessary for userspace to *** call into the kernel available to libc. It contains only constants, *** structures, and macros generated from the original header, and thus, *** contains no copyrightable information. *** *** To edit the content of this header, modify the corresponding *** source file (e.g. under external/kernel-headers/original/) then *** run bionic/libc/kernel/tools/update_all.py *** *** Any manual change here will be lost the next time this script will *** be run. You've been warned! *** **************************************************************************** ****************************************************************************/ #ifndef _IPCONNTRACK_NETLINK_H #define _IPCONNTRACK_NETLINK_H #include <linux/netfilter/nfnetlink.h> enum cntl_msg_types { /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ IPCTNL_MSG_CT_NEW, IPCTNL_MSG_CT_GET, IPCTNL_MSG_CT_DELETE, IPCTNL_MSG_CT_GET_CTRZERO, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ IPCTNL_MSG_CT_GET_STATS_CPU, IPCTNL_MSG_CT_GET_STATS, IPCTNL_MSG_CT_GET_DYING, IPCTNL_MSG_CT_GET_UNCONFIRMED, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ IPCTNL_MSG_MAX }; enum ctnl_exp_msg_types { IPCTNL_MSG_EXP_NEW, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ IPCTNL_MSG_EXP_GET, IPCTNL_MSG_EXP_DELETE, IPCTNL_MSG_EXP_GET_STATS_CPU, IPCTNL_MSG_EXP_MAX /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ }; enum ctattr_type { CTA_UNSPEC, CTA_TUPLE_ORIG, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_TUPLE_REPLY, CTA_STATUS, CTA_PROTOINFO, CTA_HELP, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_NAT_SRC, #define CTA_NAT CTA_NAT_SRC CTA_TIMEOUT, CTA_MARK, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_COUNTERS_ORIG, CTA_COUNTERS_REPLY, CTA_USE, CTA_ID, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_NAT_DST, CTA_TUPLE_MASTER, CTA_SEQ_ADJ_ORIG, CTA_NAT_SEQ_ADJ_ORIG = CTA_SEQ_ADJ_ORIG, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_SEQ_ADJ_REPLY, CTA_NAT_SEQ_ADJ_REPLY = CTA_SEQ_ADJ_REPLY, CTA_SECMARK, CTA_ZONE, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_SECCTX, CTA_TIMESTAMP, CTA_MARK_MASK, CTA_LABELS, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_LABELS_MASK, __CTA_MAX }; #define CTA_MAX (__CTA_MAX - 1) /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ enum ctattr_tuple { CTA_TUPLE_UNSPEC, CTA_TUPLE_IP, CTA_TUPLE_PROTO, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ __CTA_TUPLE_MAX }; #define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1) enum ctattr_ip { /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_IP_UNSPEC, CTA_IP_V4_SRC, CTA_IP_V4_DST, CTA_IP_V6_SRC, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_IP_V6_DST, __CTA_IP_MAX }; #define CTA_IP_MAX (__CTA_IP_MAX - 1) /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ enum ctattr_l4proto { CTA_PROTO_UNSPEC, CTA_PROTO_NUM, CTA_PROTO_SRC_PORT, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_PROTO_DST_PORT, CTA_PROTO_ICMP_ID, CTA_PROTO_ICMP_TYPE, CTA_PROTO_ICMP_CODE, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_PROTO_ICMPV6_ID, CTA_PROTO_ICMPV6_TYPE, CTA_PROTO_ICMPV6_CODE, __CTA_PROTO_MAX /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ }; #define CTA_PROTO_MAX (__CTA_PROTO_MAX - 1) enum ctattr_protoinfo { CTA_PROTOINFO_UNSPEC, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_PROTOINFO_TCP, CTA_PROTOINFO_DCCP, CTA_PROTOINFO_SCTP, __CTA_PROTOINFO_MAX /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ }; #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1) enum ctattr_protoinfo_tcp { CTA_PROTOINFO_TCP_UNSPEC, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_PROTOINFO_TCP_STATE, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, CTA_PROTOINFO_TCP_WSCALE_REPLY, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_PROTOINFO_TCP_FLAGS_REPLY, __CTA_PROTOINFO_TCP_MAX }; #define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1) /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ enum ctattr_protoinfo_dccp { CTA_PROTOINFO_DCCP_UNSPEC, CTA_PROTOINFO_DCCP_STATE, CTA_PROTOINFO_DCCP_ROLE, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, __CTA_PROTOINFO_DCCP_MAX, }; #define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1) /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ enum ctattr_protoinfo_sctp { CTA_PROTOINFO_SCTP_UNSPEC, CTA_PROTOINFO_SCTP_STATE, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_PROTOINFO_SCTP_VTAG_REPLY, __CTA_PROTOINFO_SCTP_MAX }; #define CTA_PROTOINFO_SCTP_MAX (__CTA_PROTOINFO_SCTP_MAX - 1) /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ enum ctattr_counters { CTA_COUNTERS_UNSPEC, CTA_COUNTERS_PACKETS, CTA_COUNTERS_BYTES, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_COUNTERS32_PACKETS, CTA_COUNTERS32_BYTES, __CTA_COUNTERS_MAX }; /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1) enum ctattr_tstamp { CTA_TIMESTAMP_UNSPEC, CTA_TIMESTAMP_START, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_TIMESTAMP_STOP, __CTA_TIMESTAMP_MAX }; #define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1) /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ enum ctattr_nat { CTA_NAT_UNSPEC, CTA_NAT_V4_MINIP, #define CTA_NAT_MINIP CTA_NAT_V4_MINIP /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_NAT_V4_MAXIP, #define CTA_NAT_MAXIP CTA_NAT_V4_MAXIP CTA_NAT_PROTO, CTA_NAT_V6_MINIP, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_NAT_V6_MAXIP, __CTA_NAT_MAX }; #define CTA_NAT_MAX (__CTA_NAT_MAX - 1) /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ enum ctattr_protonat { CTA_PROTONAT_UNSPEC, CTA_PROTONAT_PORT_MIN, CTA_PROTONAT_PORT_MAX, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ __CTA_PROTONAT_MAX }; #define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1) enum ctattr_seqadj { /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_SEQADJ_UNSPEC, CTA_SEQADJ_CORRECTION_POS, CTA_SEQADJ_OFFSET_BEFORE, CTA_SEQADJ_OFFSET_AFTER, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ __CTA_SEQADJ_MAX }; #define CTA_SEQADJ_MAX (__CTA_SEQADJ_MAX - 1) enum ctattr_natseq { /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_NAT_SEQ_UNSPEC, CTA_NAT_SEQ_CORRECTION_POS, CTA_NAT_SEQ_OFFSET_BEFORE, CTA_NAT_SEQ_OFFSET_AFTER, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ __CTA_NAT_SEQ_MAX }; #define CTA_NAT_SEQ_MAX (__CTA_NAT_SEQ_MAX - 1) enum ctattr_expect { /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_EXPECT_UNSPEC, CTA_EXPECT_MASTER, CTA_EXPECT_TUPLE, CTA_EXPECT_MASK, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_EXPECT_TIMEOUT, CTA_EXPECT_ID, CTA_EXPECT_HELP_NAME, CTA_EXPECT_ZONE, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_EXPECT_FLAGS, CTA_EXPECT_CLASS, CTA_EXPECT_NAT, CTA_EXPECT_FN, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ __CTA_EXPECT_MAX }; #define CTA_EXPECT_MAX (__CTA_EXPECT_MAX - 1) enum ctattr_expect_nat { /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_EXPECT_NAT_UNSPEC, CTA_EXPECT_NAT_DIR, CTA_EXPECT_NAT_TUPLE, __CTA_EXPECT_NAT_MAX /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ }; #define CTA_EXPECT_NAT_MAX (__CTA_EXPECT_NAT_MAX - 1) enum ctattr_help { CTA_HELP_UNSPEC, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_HELP_NAME, CTA_HELP_INFO, __CTA_HELP_MAX }; /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ #define CTA_HELP_MAX (__CTA_HELP_MAX - 1) enum ctattr_secctx { CTA_SECCTX_UNSPEC, CTA_SECCTX_NAME, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ __CTA_SECCTX_MAX }; #define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1) enum ctattr_stats_cpu { /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_STATS_UNSPEC, CTA_STATS_SEARCHED, CTA_STATS_FOUND, CTA_STATS_NEW, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_STATS_INVALID, CTA_STATS_IGNORE, CTA_STATS_DELETE, CTA_STATS_DELETE_LIST, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_STATS_INSERT, CTA_STATS_INSERT_FAILED, CTA_STATS_DROP, CTA_STATS_EARLY_DROP, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_STATS_ERROR, CTA_STATS_SEARCH_RESTART, __CTA_STATS_MAX, }; /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ #define CTA_STATS_MAX (__CTA_STATS_MAX - 1) enum ctattr_stats_global { CTA_STATS_GLOBAL_UNSPEC, CTA_STATS_GLOBAL_ENTRIES, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ __CTA_STATS_GLOBAL_MAX, }; #define CTA_STATS_GLOBAL_MAX (__CTA_STATS_GLOBAL_MAX - 1) enum ctattr_expect_stats { /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ CTA_STATS_EXP_UNSPEC, CTA_STATS_EXP_NEW, CTA_STATS_EXP_CREATE, CTA_STATS_EXP_DELETE, /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */ __CTA_STATS_EXP_MAX, }; #define CTA_STATS_EXP_MAX (__CTA_STATS_EXP_MAX - 1) #endif /* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
/* * Retrieve a value for one of the statistics for a particular tx ring */ int nxge_tx_ring_stat(mac_ring_driver_t rdriver, uint_t stat, uint64_t *val) { p_nxge_ring_handle_t rhp = (p_nxge_ring_handle_t)rdriver; p_nxge_t nxgep = rhp->nxgep; int r_index; p_nxge_stats_t statsp; ASSERT(nxgep != NULL); statsp = (p_nxge_stats_t)nxgep->statsp; ASSERT(statsp != NULL); r_index = nxgep->pt_config.hw_config.tdc.start + rhp->index; if (statsp->tdc_ksp[r_index] == NULL) return (0); switch (stat) { case MAC_STAT_OERRORS: *val = statsp->tdc_stats[r_index].oerrors; break; case MAC_STAT_OBYTES: *val = statsp->tdc_stats[r_index].obytes; break; case MAC_STAT_OPACKETS: *val = statsp->tdc_stats[r_index].opackets; break; default: *val = 0; return (ENOTSUP); } return (0); }
/*- *----------------------------------------------------------------------- * SuffFindArchiveDeps -- * Locate dependencies for an OP_ARCHV node. * * Side Effects: * Same as Suff_FindDeps *----------------------------------------------------------------------- */ static void SuffFindArchiveDeps( GNode *gn, Lst slst) { char *eoarch; char *eoname; GNode *mem; Suff *ms; char *name; eoarch = strchr(gn->name, '('); if (eoarch == NULL) return; name = eoarch + 1; eoname = strchr(name, ')'); if (eoname == NULL) return; mem = Targ_FindNodei(name, eoname, TARG_CREATE); SuffFindDeps(mem, slst); if (Lst_AddNew(&gn->children, mem)) LinkParent(mem, gn); Var(TARGET_INDEX, gn) = Var(TARGET_INDEX, mem); Var(PREFIX_INDEX, gn) = Var(PREFIX_INDEX, mem); ms = mem->suffix; if (ms == NULL) { if (DEBUG(SUFF)) printf("using empty suffix\n"); ms = emptySuff; } Var(MEMBER_INDEX, gn) = mem->name; Var(ARCHIVE_INDEX, gn) = gn->name; if (ms != NULL) { Suff *suff; suff = find_suffix_as_suffix(&ms->parents, gn->name, eoarch); if (suff != NULL) { if (!SuffApplyTransform(gn, mem, suff, ms) && DEBUG(SUFF)) printf("\tNo transformation from %s -> %s\n", ms->name, suff->name); } } if (OP_NOP(gn->type)) gn->type |= OP_DEPENDS; mem->type |= OP_MEMBER; }
/* Given an EXPR_COMPCALL calling a GENERIC typebound procedure, figure out which of the specific bindings (if any) matches the arglist and transform the expression into a call of that binding. */ static bool resolve_typebound_generic_call (gfc_expr* e, const char **name) { gfc_typebound_proc* genproc; const char* genname; gfc_symtree *st; gfc_symbol *derived; gcc_assert (e->expr_type == EXPR_COMPCALL); genname = e->value.compcall.name; genproc = e->value.compcall.tbp; if (!genproc->is_generic) return true; for (; genproc; genproc = genproc->overridden) { gfc_tbp_generic* g; gcc_assert (genproc->is_generic); for (g = genproc->u.generic; g; g = g->next) { gfc_symbol* target; gfc_actual_arglist* args; bool matches; gcc_assert (g->specific); if (g->specific->error) continue; target = g->specific->u.specific->n.sym; args = gfc_copy_actual_arglist (e->value.compcall.actual); if (!g->specific->nopass) { gfc_expr* po; po = extract_compcall_passed_object (e); if (!po) { gfc_free_actual_arglist (args); return false; } gcc_assert (g->specific->pass_arg_num > 0); gcc_assert (!g->specific->error); args = update_arglist_pass (args, po, g->specific->pass_arg_num, g->specific->pass_arg); } resolve_actual_arglist (args, target->attr.proc, is_external_proc (target) && gfc_sym_get_dummy_args (target) == NULL); matches = gfc_arglist_matches_symbol (&args, target); gfc_free_actual_arglist (args); if (matches) { e->value.compcall.tbp = g->specific; genname = g->specific_st->name; if (name) *name = genname; goto success; } } } gfc_error ("Found no matching specific binding for the call to the GENERIC" " %qs at %L", genname, &e->where); return false; success: derived = get_declared_from_expr (NULL, NULL, e, true); st = gfc_find_typebound_proc (derived, NULL, genname, true, &e->where); if (st) e->value.compcall.tbp = st->n.tb; return true; }
/* * \brief Function to configure UART module pin mux * * \return None */ static void Board_configUartPinMux(void) { hSysCfg->PINMUX4 &= (~(CSL_SYSCFG_PINMUX4_PINMUX4_31_28_MASK | CSL_SYSCFG_PINMUX4_PINMUX4_27_24_MASK | CSL_SYSCFG_PINMUX4_PINMUX4_23_20_MASK | CSL_SYSCFG_PINMUX4_PINMUX4_19_16_MASK)); hSysCfg->PINMUX4 |= BOARD_PINMUX4_UART_ENABLE; }
/** * @internal walk the given UTF8 string, looking for non-ASCII characters. * @return 0 if none were found, or, if non-ASCII strings were found, * answer the length of the buffer if it were converted to platform * encoding * * @note this relies on the assumption that wide chars are Unicode. * If not, the platform will need different support for this */ static IDATA walkUTF8String (const U_8 * buf, IDATA nbytes) { const U_8 *end = buf + nbytes; const U_8 *cursor = buf; IDATA newLength = 0; int hasHighChars = 0; int wcresult; wcresult = wctomb (NULL, 0); while (cursor < end) { if ((*cursor & 0x80) == 0x80) { char temp[MB_CUR_MAX]; U_16 unicode; U_32 numberU8Consumed = decodeUTF8CharN (cursor, &unicode, end - cursor); if (numberU8Consumed == 0) { return 0; } cursor += numberU8Consumed; wcresult = wctomb (temp, (wchar_t) unicode); if (wcresult == -1) { newLength += 1; } else { newLength += wcresult; } hasHighChars = 1; } else { newLength += 1; cursor += 1; } } return hasHighChars ? newLength : 0; }
#include<stdio.h> int main(void) { long long int q,cnt,n,m,x,i,arr[12]; scanf("%lld",&q); while(q--) { scanf("%lld%lld",&n,&m); cnt = n/m; arr[1] = m%10; arr[0] = 0; for(i=2;i<=11;i++) { x=(i*m)%10; if(x==arr[1]) { i--; break; } else { arr[i]=x+arr[i-1]; } } printf("%lld\n",(cnt/i)*arr[i]+arr[cnt%i]); } return 0; }
// in the case we have a long list of items, e.g. 5000 and the 3 values added up // to the sum are located between 4500 to 5000, then searching from the // beginning will be in efficient. // // in this case, we can make use of binary search approach that we do the // following iterations: // - 1st = midpoint is at 2500, we try to find two values from 0 - 2499 and // another two from 2501 - 5000 // to add up to sum. // - 2nd = midpoint is at 3751, we try to find two values from 2501 - 3750 and // aniother two from 3752 - 5000 // - 3rd = midpoint is at 4377, and we further divide it into two panes. // // if we are diving into right pane, we will shorten the array in subcall as we // do not awant the subcall left pane to extend toward all the way to 0. int deteremineIf3NumberSumToValueInDivAndConquerRecursive(int array[], int size, int sum, int mid) { int i; int j; if (mid >= size) return 0; if (mid >= 2) { for (i = mid - 1; i > 0; i--) { for (j = i - 1; j >= 0; j--) { if ((array[mid] + array[i] + array[j]) == sum) goto found; } } } for (i = mid + 1; i < size - 1; i++) { for (j = i + 1; j < size; j++) { if ((array[mid] + array[i] + array[j]) == sum) goto found; } } if ((size - mid) > 5) { if (deteremineIf3NumberSumToValueInDivAndConquerRecursive( &array[mid + 1], size - mid - 1, sum, (size - mid - 1) / 2)) goto found; } else { for (i = mid + 1; i < size - 2; i++) { if (deteremineIf3NumberSumToValueInDivAndConquerRecursive( &array[i], size - mid - 1, sum, 0)) goto found; } } if (mid > 0 && deteremineIf3NumberSumToValueInDivAndConquerRecursive( array, size, sum, mid / 2)) goto found; return 0; found: return 1; }
/** * \brief Enables the ADC module. * * Enables an ADC module that has previously been configured. If any internal reference * is selected it will be enabled. * * \param[in] module_inst Pointer to the ADC software instance struct */ static inline enum status_code adc_enable( struct adc_module *const module_inst) { Assert(module_inst); Assert(module_inst->hw); Adc *const adc_module = module_inst->hw; while (adc_is_syncing(module_inst)) { } #if ADC_CALLBACK_MODE == true # if (ADC_INST_NUM > 1) system_interrupt_enable(_adc_interrupt_get_interrupt_vector( _adc_get_inst_index(adc_module))); # elif (SAMC20) system_interrupt_enable(SYSTEM_INTERRUPT_MODULE_ADC0); # else system_interrupt_enable(SYSTEM_INTERRUPT_MODULE_ADC); # endif #endif adc_module->INTENCLR.reg = ADC_INTENCLR_MASK; adc_module->INTFLAG.reg = ADC_INTFLAG_MASK; adc_module->CTRLA.reg |= ADC_CTRLA_ENABLE; while (adc_is_syncing(module_inst)) { } return STATUS_OK; }
/* * Routine: kdp_lck_spin_is_acquired * NOT SAFE: To be used only by kernel debugger to avoid deadlock. * Returns: TRUE if lock is acquired. */ boolean_t kdp_lck_spin_is_acquired(lck_spin_t *lck) { if (not_in_kdp) { panic("panic: spinlock acquired check done outside of kernel debugger"); } return (lck->interlock != 0)? TRUE : FALSE; }
// Auto-generated file. Do not edit! // Template: src/x32-transposec/neon-zip.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <arm_neon.h> #include <assert.h> #include <xnnpack/common.h> #include <xnnpack/math.h> #include <xnnpack/transpose.h> void xnn_x32_transposec_ukernel__4x4_multi_multi_zip_neon( const uint32_t* input, uint32_t* output, size_t input_stride, size_t output_stride, size_t block_width, size_t block_height, const union xnn_x32_transpose_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(output_stride >= block_height * sizeof(uint32_t)); assert(input_stride >= block_width * sizeof(uint32_t)); const size_t tile_height = 4; const size_t tile_width = 4; const size_t tile_hbytes = tile_height * sizeof(uint32_t); const size_t tile_wbytes = tile_width * sizeof(uint32_t); const size_t input_reset = tile_wbytes - round_down_po2(block_height, tile_height) * input_stride; const size_t input_offset = tile_height * input_stride; const size_t output_reset = tile_width * output_stride - round_down_po2(block_height, 2) * sizeof(uint32_t); const uint32_t* i0 = input; const uint32_t* i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride); const uint32_t* i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride); const uint32_t* i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride); uint32_t* o0 = (uint32_t*) output; uint32_t* o1 = (uint32_t*) ((uintptr_t) o0 + output_stride); uint32_t* o2 = (uint32_t*) ((uintptr_t) o1 + output_stride); uint32_t* o3 = (uint32_t*) ((uintptr_t) o2 + output_stride); do { if XNN_UNPREDICTABLE(block_width < 2) { o1 = o0; } if XNN_UNPREDICTABLE(block_width <= 2) { o2 = o0; } if XNN_UNPREDICTABLE(block_width < 4) { o3 = o0; } size_t bh = block_height; for (; bh >= 4; bh -= 4) { const uint32x4_t v2_0 = vld1q_u32(i0); i0 = (uint32_t*) ((uintptr_t) i0 + input_offset); const uint32x4_t v2_1 = vld1q_u32(i1); i1 = (uint32_t*) ((uintptr_t) i1 + input_offset); const uint32x4_t v2_2 = vld1q_u32(i2); i2 = (uint32_t*) ((uintptr_t) i2 + input_offset); const uint32x4_t v2_3 = vld1q_u32(i3); i3 = (uint32_t*) ((uintptr_t) i3 + input_offset); const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2); const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3); const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]); const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]); vst1q_u32(o3, v0_1.val[1]); o3 = (uint32_t*) ((uintptr_t) o3 + tile_hbytes); vst1q_u32(o2, v0_1.val[0]); o2 = (uint32_t*) ((uintptr_t) o2 + tile_hbytes); vst1q_u32(o1, v0_0.val[1]); o1 = (uint32_t*) ((uintptr_t) o1 + tile_hbytes); vst1q_u32(o0, v0_0.val[0]); o0 = (uint32_t*) ((uintptr_t) o0 + tile_hbytes); } if (bh != 0) { const uint32x4_t v2_0 = vld1q_u32(i0); if XNN_UNPREDICTABLE(bh < 2) { i1 = i0; } const uint32x4_t v2_1 = vld1q_u32(i1); if XNN_UNPREDICTABLE(bh <= 2) { i2 = i0; } const uint32x4_t v2_2 = vld1q_u32(i2); const uint32x4_t v2_3 = vmovq_n_u32(0); const uint32x4x2_t v1_0 = vzipq_u32(v2_0, v2_2); const uint32x4x2_t v1_1 = vzipq_u32(v2_1, v2_3); const uint32x4x2_t v0_0 = vzipq_u32(v1_0.val[0], v1_1.val[0]); const uint32x4x2_t v0_1 = vzipq_u32(v1_0.val[1], v1_1.val[1]); uint32x2_t v0_low = vget_low_u32(v0_0.val[0]); uint32x2_t v1_low = vget_low_u32(v0_0.val[1]); uint32x2_t v2_low = vget_low_u32(v0_1.val[0]); uint32x2_t v3_low = vget_low_u32(v0_1.val[1]); if (bh & 2) { vst1_u32(o3, v3_low); o3 += 2; vst1_u32(o2, v2_low); o2 += 2; vst1_u32(o1, v1_low); o1 += 2; vst1_u32(o0, v0_low); o0 += 2; v0_low = vget_high_u32(v0_0.val[0]); v1_low = vget_high_u32(v0_0.val[1]); v2_low = vget_high_u32(v0_1.val[0]); v3_low = vget_high_u32(v0_1.val[1]); } if (bh & 1) { vst1_lane_u32(o3, v3_low, 0); vst1_lane_u32(o2, v2_low, 0); vst1_lane_u32(o1, v1_low, 0); vst1_lane_u32(o0, v0_low, 0); } } i0 = (const uint32_t*) ((uintptr_t) i0 + input_reset); i1 = (const uint32_t*) ((uintptr_t) i0 + input_stride); i2 = (const uint32_t*) ((uintptr_t) i1 + input_stride); i3 = (const uint32_t*) ((uintptr_t) i2 + input_stride); o0 = (uint32_t*) ((uintptr_t) o0 + output_reset); o1 = (uint32_t*) ((uintptr_t) o1 + output_reset); o2 = (uint32_t*) ((uintptr_t) o2 + output_reset); o3 = (uint32_t*) ((uintptr_t) o3 + output_reset); block_width = doz(block_width, tile_width); } while (block_width != 0); }
/** Check if component buffer is sane */ static bool pool_tree_sane(struct pool_domain *tree, uint32_t version) { struct pool_domain *parent = NULL; struct pool_target *targets = tree[0].do_targets; struct pool_comp_cntr cntr; int dom_nr; int i; D_DEBUG(DB_TRACE, "Sanity check of component buffer\n"); pool_tree_count(tree, &cntr); if (cntr.cc_targets == 0) { D_DEBUG(DB_MGMT, "Buffer has no target\n"); return false; } for (dom_nr = cntr.cc_top_doms; tree != NULL; tree = tree[0].do_children) { struct pool_domain *prev = &tree[0]; int child_nr = 0; if (parent != NULL && parent->do_comp.co_type >= tree[0].do_comp.co_type) { D_DEBUG(DB_MGMT, "Type of parent domain %d(%s) should be " "smaller than child domain %d(%s)\n", parent->do_comp.co_type, pool_domain_name(parent), tree[0].do_comp.co_type, pool_domain_name(&tree[0])); return false; } for (i = 0; i < dom_nr; i++) { if (tree[i].do_comp.co_ver > version) { D_DEBUG(DB_MGMT, "Invalid version %u/%u\n", tree[i].do_comp.co_ver, version); return false; } if (prev->do_comp.co_type != tree[i].do_comp.co_type) { D_DEBUG(DB_MGMT, "Unmatched domain type %d/%d\n", tree[i].do_comp.co_type, prev->do_comp.co_type); return false; } if (tree[i].do_targets == NULL || tree[i].do_target_nr == 0) { D_DEBUG(DB_MGMT, "No target found\n"); return false; } if ((prev->do_children == NULL) ^ (tree[i].do_children == NULL)) { D_DEBUG(DB_MGMT, "Invalid child tree\n"); return false; } if ((prev->do_targets == NULL) ^ (tree[i].do_targets == NULL)) { D_DEBUG(DB_MGMT, "Invalid target tree\n"); return false; } if (prev != &tree[i] && prev->do_children != NULL && prev->do_children + prev->do_child_nr != tree[i].do_children) { D_DEBUG(DB_MGMT, "Invalid children pointer\n"); return false; } if (prev != &tree[i] && prev->do_targets != NULL && prev->do_targets + prev->do_target_nr != tree[i].do_targets) { D_DEBUG(DB_MGMT, "Invalid children pointer i" " %d target nr %d\n", i, prev->do_target_nr); return false; } if (tree[i].do_child_nr != 0) child_nr += tree[i].do_child_nr; prev = &tree[i]; } parent = &tree[0]; dom_nr = child_nr; } for (i = 0; i < cntr.cc_targets; i++) { if (targets[i].ta_comp.co_type != PO_COMP_TP_TARGET) { D_DEBUG(DB_MGMT, "Invalid leaf type %d(%s) i %d\n", targets[i].ta_comp.co_type, pool_comp_name(&targets[i].ta_comp), i); return false; } if (targets[i].ta_comp.co_ver > version) { D_DEBUG(DB_MGMT, "Invalid version %u/%u i %d\n", targets[i].ta_comp.co_ver, version, i); return false; } } D_DEBUG(DB_TRACE, "Component buffer is sane\n"); return true; }
/** * @brief Frees a parsed XPath expression. \p exp should not be used afterwards. * * @param[in] exp Expression to free. */ static void exp_free(struct lyxp_expr *exp) { uint16_t i; if (!exp) { return; } free(exp->expr); free(exp->tokens); free(exp->expr_pos); free(exp->tok_len); if (exp->repeat) { for (i = 0; i < exp->used; ++i) { free(exp->repeat[i]); } } free(exp->repeat); free(exp); }
/* helper to copy a point to temp area */ static void gpencil_copy_move_point(bGPDstroke *gps, bGPDspoint *temp_points, MDeformVert *temp_dverts, int from_idx, int to_idx, const bool copy) { bGPDspoint *pt = &temp_points[from_idx]; bGPDspoint *pt_final = &gps->points[to_idx]; copy_v3_v3(&pt_final->x, &pt->x); pt_final->pressure = pt->pressure; pt_final->strength = pt->strength; pt_final->time = pt->time; pt_final->flag = pt->flag; pt_final->uv_fac = pt->uv_fac; pt_final->uv_rot = pt->uv_rot; copy_v4_v4(pt_final->vert_color, pt->vert_color); if (gps->dvert != NULL) { MDeformVert *dvert = &temp_dverts[from_idx]; MDeformVert *dvert_final = &gps->dvert[to_idx]; dvert_final->totweight = dvert->totweight; if (copy) { dvert_final->dw = MEM_dupallocN(dvert->dw); } else { dvert_final->dw = dvert->dw; } } }
// Handle the case of known failure of transmit or receive to a gateway having failed void sensorGatewayRequestFailure(bool wasTX, const char *why) { if (wasTX) { traceSetID("to", gatewayAddress, LastRequestID); } else { traceSetID("fm", gatewayAddress, LastRequestID); } APP_PRINTF("%s %s\r\n", tracePeer(), why); atpGatewayMessageLost(); if (sensorResendToGateway()) { return; } freeMessageToSendBuffer(); memset(&wireReceivedCarrier, 0, sizeof(wireReceivedCarrier)); memset(&wireReceived, 0, sizeof(wireReceived)); schedRequestResponseTimeout(); sensorCoreIdle(); }
/* Application define function which creates the threads. */ void CyFxApplicationDefine ( void) { void *ptr = NULL; uint32_t retThrdCreate = CY_U3P_SUCCESS; ptr = CyU3PMemAlloc (CY_FX_BULKSRCSINK_THREAD_STACK); if ( CyU3PEventCreate (&appEvent) != 0 ) { while(1); } retThrdCreate = CyU3PThreadCreate (&bulkSrcSinkAppThread, "21:Bulk_src_sink", AppThread_Entry, 0, ptr, CY_FX_BULKSRCSINK_THREAD_STACK, CY_FX_BULKSRCSINK_THREAD_PRIORITY, CY_FX_BULKSRCSINK_THREAD_PRIORITY, CYU3P_NO_TIME_SLICE, CYU3P_AUTO_START ); if (retThrdCreate != 0) { while(1); } }
/************************************************************* * Function: calculate_acceleration_mi_hour () * * Date Created: September 5, 2011 * * Date Last Modified: September 5, 2011 * * Description: This function calculates the constant rate * * of acceleration in mi/hr^2. * * Input parameters: The initial and final velocities, * * and initial time interval. * * Returns: The constant acceleration in mi/hr^2. * * Preconditions: The final and initial velocities * * and initial_time_interval * * Postconditions: The acceleration has been calculated in * * mi/hr^2. * *************************************************************/ double calculate_acceleration_mi_hour (double init_velocity, double final_velocity, double time_interval) { double acceleration = 0.0; acceleration = (final_velocity - init_velocity) / (time_interval / MIN_PER_HOUR); return acceleration; }
/* * Move filecaps structure to the new place and clear the old place. */ void filecaps_move(struct filecaps *src, struct filecaps *dst) { *dst = *src; bzero(src, sizeof(*src)); }
/* * seldrop_locked * * Drop outstanding wait queue references set up during selscan(); drop the * outstanding per fileproc f_iocount() picked up during the selcount(). * * Parameters: p Process performing the select * ibits Input pit bector of fd's * nfd Number of fd's * lim Limit to number of vector entries to * consider, or -1 for "all" * inselect True if * need_wakeup Pointer to flag to set to do a wakeup * if f_iocont on any descriptor goes to 0 * * Returns: 0 Success * EBADF One or more fds in the bit vector * were invalid, but the rest * were successfully dropped * * Notes: An fd make become bad while the proc_fdlock() is not held, * if a multithreaded application closes the fd out from under * the in progress select. In this case, we still have to * clean up after the set up on the remaining fds. */ static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup, int fromselcount) { struct filedesc *fdp = p->p_fd; int msk, i, j, fd; u_int32_t bits; struct fileproc *fp; u_int32_t *iptr; u_int nw; int error = 0; int dropcount = 0; uthread_t uth = get_bsdthread_info(current_thread()); *need_wakeup = 0; if (fdp == NULL) { return(EIO); } nw = howmany(nfd, NFDBITS); for (msk = 0; msk < 3; msk++) { iptr = (u_int32_t *)&ibits[msk * nw]; for (i = 0; i < nfd; i += NFDBITS) { bits = iptr[i/NFDBITS]; while ((j = ffs(bits)) && (fd = i + --j) < nfd) { bits &= ~(1 << j); fp = fdp->fd_ofiles[fd]; if ((fromselcount != 0) && (++dropcount > lim)) goto done; if (fp == NULL) { error = EBADF; continue; } if ((fp->f_flags & FP_INSELECT) && (fp->f_waddr == (void *)uth->uu_wqset)) { fp->f_flags &= ~FP_INSELECT; fp->f_waddr = (void *)0; } fp->f_iocount--; if (fp->f_iocount < 0) panic("f_iocount overdecrement!"); if (fp->f_iocount == 0) { if (fp->f_flags & FP_SELCONFLICT) fp->f_flags &= ~FP_SELCONFLICT; if (p->p_fpdrainwait) { p->p_fpdrainwait = 0; *need_wakeup = 1; } } } } } done: return (error); }
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copied from Linux Monitor (LiMon) - Networking. * * Copyright 1994 - 2000 Neil Russell. * (See License) * Copyright 2000 Roland Borde * Copyright 2000 Paolo Scaffardi * Copyright 2000-2002 Wolfgang Denk, wd@denx.de */ #ifndef __ARP_H__ #define __ARP_H__ #include <common.h> extern struct in_addr net_arp_wait_packet_ip; /* MAC address of waiting packet's destination */ extern uchar *arp_wait_packet_ethaddr; extern int arp_wait_tx_packet_size; extern ulong arp_wait_timer_start; extern int arp_wait_try; extern uchar *arp_tx_packet; void arp_init(void); void arp_request(void); void arp_raw_request(struct in_addr source_ip, const uchar *targetEther, struct in_addr target_ip); int arp_timeout_check(void); void arp_receive(struct ethernet_hdr *et, struct ip_udp_hdr *ip, int len); #endif /* __ARP_H__ */
/* * Searching for 'wondering' chars within the code file. */ void searchUndefChars() { char lineCpy[LINE_SIZE], temp[2]; size_t i = 0; boolean IN_STRING = FALSE; strcpy(lineCpy, line); for (i = 0; lineCpy[i]; i++) { if (isspace(lineCpy[i])) continue; if (lineCpy[i] == '\"' && !IN_STRING ) { IN_STRING = TRUE; } else if (lineCpy[i] == '\"' && IN_STRING) { IN_STRING = FALSE; } if (IN_STRING) continue; if ((!isalnum(lineCpy[i])) && lineCpy[i] != COMMA && lineCpy[i] != O_BRACKETS && lineCpy[i] != C_BRACKETS && lineCpy[i] != '.' && lineCpy[i] != '\"' && lineCpy[i] != '#' && lineCpy[i] != ISLABEL && lineCpy[i] != '-' && lineCpy[i] != '+' && lineCpy[i] != ISCOMMENT) { strcpy(temp, (lineCpy + i)); removeSpaces(temp); printErrorWithComment(undef_char, temp); } } }
/* microprotocols.c - definitions for minimalist and non-validating protocols * * Copyright (C) 2003-2004 Federico Di Gregorio <fog@debian.org> * * This file is part of psycopg and was adapted for pysqlite. Federico Di * Gregorio gave the permission to use it within pysqlite under the following * license: * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. */ #ifndef PSYCOPG_MICROPROTOCOLS_H #define PSYCOPG_MICROPROTOCOLS_H 1 #define PY_SSIZE_T_CLEAN #include <Python.h> /** exported functions **/ /* used by module.c to init the microprotocols system */ extern int pysqlite_microprotocols_init(PyObject *module); extern int pysqlite_microprotocols_add(pysqlite_state *state, PyTypeObject *type, PyObject *proto, PyObject *cast); extern PyObject *pysqlite_microprotocols_adapt(pysqlite_state *state, PyObject *obj, PyObject *proto, PyObject *alt); #endif /* !defined(PSYCOPG_MICROPROTOCOLS_H) */
/* * AMD Cryptographic Coprocessor (CCP) driver * * Copyright (C) 2013 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <thomas.lendacky@amd.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __CPP_H__ #define __CPP_H__ #include <linux/scatterlist.h> #include <linux/workqueue.h> #include <linux/list.h> #include <crypto/aes.h> #include <crypto/sha.h> struct ccp_device; struct ccp_cmd; #if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \ defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE) /** * ccp_enqueue_cmd - queue an operation for processing by the CCP * * @cmd: ccp_cmd struct to be processed * * Refer to the ccp_cmd struct below for required fields. * * Queue a cmd to be processed by the CCP. If queueing the cmd * would exceed the defined length of the cmd queue the cmd will * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will * result in a return code of -EBUSY. * * The callback routine specified in the ccp_cmd struct will be * called to notify the caller of completion (if the cmd was not * backlogged) or advancement out of the backlog. If the cmd has * advanced out of the backlog the "err" value of the callback * will be -EINPROGRESS. Any other "err" value during callback is * the result of the operation. * * The cmd has been successfully queued if: * the return code is -EINPROGRESS or * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set */ int ccp_enqueue_cmd(struct ccp_cmd *cmd); #else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */ static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_CCP_DD */ /***** AES engine *****/ /** * ccp_aes_type - AES key size * * @CCP_AES_TYPE_128: 128-bit key * @CCP_AES_TYPE_192: 192-bit key * @CCP_AES_TYPE_256: 256-bit key */ enum ccp_aes_type { CCP_AES_TYPE_128 = 0, CCP_AES_TYPE_192, CCP_AES_TYPE_256, CCP_AES_TYPE__LAST, }; /** * ccp_aes_mode - AES operation mode * * @CCP_AES_MODE_ECB: ECB mode * @CCP_AES_MODE_CBC: CBC mode * @CCP_AES_MODE_OFB: OFB mode * @CCP_AES_MODE_CFB: CFB mode * @CCP_AES_MODE_CTR: CTR mode * @CCP_AES_MODE_CMAC: CMAC mode */ enum ccp_aes_mode { CCP_AES_MODE_ECB = 0, CCP_AES_MODE_CBC, CCP_AES_MODE_OFB, CCP_AES_MODE_CFB, CCP_AES_MODE_CTR, CCP_AES_MODE_CMAC, CCP_AES_MODE__LAST, }; /** * ccp_aes_mode - AES operation mode * * @CCP_AES_ACTION_DECRYPT: AES decrypt operation * @CCP_AES_ACTION_ENCRYPT: AES encrypt operation */ enum ccp_aes_action { CCP_AES_ACTION_DECRYPT = 0, CCP_AES_ACTION_ENCRYPT, CCP_AES_ACTION__LAST, }; /** * struct ccp_aes_engine - CCP AES operation * @type: AES operation key size * @mode: AES operation mode * @action: AES operation (decrypt/encrypt) * @key: key to be used for this AES operation * @key_len: length in bytes of key * @iv: IV to be used for this AES operation * @iv_len: length in bytes of iv * @src: data to be used for this operation * @dst: data produced by this operation * @src_len: length in bytes of data used for this operation * @cmac_final: indicates final operation when running in CMAC mode * @cmac_key: K1/K2 key used in final CMAC operation * @cmac_key_len: length in bytes of cmac_key * * Variables required to be set when calling ccp_enqueue_cmd(): * - type, mode, action, key, key_len, src, dst, src_len * - iv, iv_len for any mode other than ECB * - cmac_final for CMAC mode * - cmac_key, cmac_key_len for CMAC mode if cmac_final is non-zero * * The iv variable is used as both input and output. On completion of the * AES operation the new IV overwrites the old IV. */ struct ccp_aes_engine { enum ccp_aes_type type; enum ccp_aes_mode mode; enum ccp_aes_action action; struct scatterlist *key; u32 key_len; /* In bytes */ struct scatterlist *iv; u32 iv_len; /* In bytes */ struct scatterlist *src, *dst; u64 src_len; /* In bytes */ u32 cmac_final; /* Indicates final cmac cmd */ struct scatterlist *cmac_key; /* K1/K2 cmac key required for * final cmac cmd */ u32 cmac_key_len; /* In bytes */ }; /***** XTS-AES engine *****/ /** * ccp_xts_aes_unit_size - XTS unit size * * @CCP_XTS_AES_UNIT_SIZE_16: Unit size of 16 bytes * @CCP_XTS_AES_UNIT_SIZE_512: Unit size of 512 bytes * @CCP_XTS_AES_UNIT_SIZE_1024: Unit size of 1024 bytes * @CCP_XTS_AES_UNIT_SIZE_2048: Unit size of 2048 bytes * @CCP_XTS_AES_UNIT_SIZE_4096: Unit size of 4096 bytes */ enum ccp_xts_aes_unit_size { CCP_XTS_AES_UNIT_SIZE_16 = 0, CCP_XTS_AES_UNIT_SIZE_512, CCP_XTS_AES_UNIT_SIZE_1024, CCP_XTS_AES_UNIT_SIZE_2048, CCP_XTS_AES_UNIT_SIZE_4096, CCP_XTS_AES_UNIT_SIZE__LAST, }; /** * struct ccp_xts_aes_engine - CCP XTS AES operation * @action: AES operation (decrypt/encrypt) * @unit_size: unit size of the XTS operation * @key: key to be used for this XTS AES operation * @key_len: length in bytes of key * @iv: IV to be used for this XTS AES operation * @iv_len: length in bytes of iv * @src: data to be used for this operation * @dst: data produced by this operation * @src_len: length in bytes of data used for this operation * @final: indicates final XTS operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - action, unit_size, key, key_len, iv, iv_len, src, dst, src_len, final * * The iv variable is used as both input and output. On completion of the * AES operation the new IV overwrites the old IV. */ struct ccp_xts_aes_engine { enum ccp_aes_action action; enum ccp_xts_aes_unit_size unit_size; struct scatterlist *key; u32 key_len; /* In bytes */ struct scatterlist *iv; u32 iv_len; /* In bytes */ struct scatterlist *src, *dst; u64 src_len; /* In bytes */ u32 final; }; /***** SHA engine *****/ #define CCP_SHA_BLOCKSIZE SHA256_BLOCK_SIZE #define CCP_SHA_CTXSIZE SHA256_DIGEST_SIZE /** * ccp_sha_type - type of SHA operation * * @CCP_SHA_TYPE_1: SHA-1 operation * @CCP_SHA_TYPE_224: SHA-224 operation * @CCP_SHA_TYPE_256: SHA-256 operation */ enum ccp_sha_type { CCP_SHA_TYPE_1 = 1, CCP_SHA_TYPE_224, CCP_SHA_TYPE_256, CCP_SHA_TYPE__LAST, }; /** * struct ccp_sha_engine - CCP SHA operation * @type: Type of SHA operation * @ctx: current hash value * @ctx_len: length in bytes of hash value * @src: data to be used for this operation * @src_len: length in bytes of data used for this operation * @opad: data to be used for final HMAC operation * @opad_len: length in bytes of data used for final HMAC operation * @first: indicates first SHA operation * @final: indicates final SHA operation * @msg_bits: total length of the message in bits used in final SHA operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - type, ctx, ctx_len, src, src_len, final * - msg_bits if final is non-zero * * The ctx variable is used as both input and output. On completion of the * SHA operation the new hash value overwrites the old hash value. */ struct ccp_sha_engine { enum ccp_sha_type type; struct scatterlist *ctx; u32 ctx_len; /* In bytes */ struct scatterlist *src; u64 src_len; /* In bytes */ struct scatterlist *opad; u32 opad_len; /* In bytes */ u32 first; /* Indicates first sha cmd */ u32 final; /* Indicates final sha cmd */ u64 msg_bits; /* Message length in bits required for * final sha cmd */ }; /***** RSA engine *****/ /** * struct ccp_rsa_engine - CCP RSA operation * @key_size: length in bits of RSA key * @exp: RSA exponent * @exp_len: length in bytes of exponent * @mod: RSA modulus * @mod_len: length in bytes of modulus * @src: data to be used for this operation * @dst: data produced by this operation * @src_len: length in bytes of data used for this operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - key_size, exp, exp_len, mod, mod_len, src, dst, src_len */ struct ccp_rsa_engine { u32 key_size; /* In bits */ struct scatterlist *exp; u32 exp_len; /* In bytes */ struct scatterlist *mod; u32 mod_len; /* In bytes */ struct scatterlist *src, *dst; u32 src_len; /* In bytes */ }; /***** Passthru engine *****/ /** * ccp_passthru_bitwise - type of bitwise passthru operation * * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask */ enum ccp_passthru_bitwise { CCP_PASSTHRU_BITWISE_NOOP = 0, CCP_PASSTHRU_BITWISE_AND, CCP_PASSTHRU_BITWISE_OR, CCP_PASSTHRU_BITWISE_XOR, CCP_PASSTHRU_BITWISE_MASK, CCP_PASSTHRU_BITWISE__LAST, }; /** * ccp_passthru_byteswap - type of byteswap passthru operation * * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words */ enum ccp_passthru_byteswap { CCP_PASSTHRU_BYTESWAP_NOOP = 0, CCP_PASSTHRU_BYTESWAP_32BIT, CCP_PASSTHRU_BYTESWAP_256BIT, CCP_PASSTHRU_BYTESWAP__LAST, }; /** * struct ccp_passthru_engine - CCP pass-through operation * @bit_mod: bitwise operation to perform * @byte_swap: byteswap operation to perform * @mask: mask to be applied to data * @mask_len: length in bytes of mask * @src: data to be used for this operation * @dst: data produced by this operation * @src_len: length in bytes of data used for this operation * @final: indicate final pass-through operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - bit_mod, byte_swap, src, dst, src_len * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP */ struct ccp_passthru_engine { enum ccp_passthru_bitwise bit_mod; enum ccp_passthru_byteswap byte_swap; struct scatterlist *mask; u32 mask_len; /* In bytes */ struct scatterlist *src, *dst; u64 src_len; /* In bytes */ u32 final; }; /***** ECC engine *****/ #define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ #define CCP_ECC_MAX_OPERANDS 6 #define CCP_ECC_MAX_OUTPUTS 3 /** * ccp_ecc_function - type of ECC function * * @CCP_ECC_FUNCTION_MMUL_384BIT: 384-bit modular multiplication * @CCP_ECC_FUNCTION_MADD_384BIT: 384-bit modular addition * @CCP_ECC_FUNCTION_MINV_384BIT: 384-bit multiplicative inverse * @CCP_ECC_FUNCTION_PADD_384BIT: 384-bit point addition * @CCP_ECC_FUNCTION_PMUL_384BIT: 384-bit point multiplication * @CCP_ECC_FUNCTION_PDBL_384BIT: 384-bit point doubling */ enum ccp_ecc_function { CCP_ECC_FUNCTION_MMUL_384BIT = 0, CCP_ECC_FUNCTION_MADD_384BIT, CCP_ECC_FUNCTION_MINV_384BIT, CCP_ECC_FUNCTION_PADD_384BIT, CCP_ECC_FUNCTION_PMUL_384BIT, CCP_ECC_FUNCTION_PDBL_384BIT, }; /** * struct ccp_ecc_modular_math - CCP ECC modular math parameters * @operand_1: first operand for the modular math operation * @operand_1_len: length of the first operand * @operand_2: second operand for the modular math operation * (not used for CCP_ECC_FUNCTION_MINV_384BIT) * @operand_2_len: length of the second operand * (not used for CCP_ECC_FUNCTION_MINV_384BIT) * @result: result of the modular math operation * @result_len: length of the supplied result buffer */ struct ccp_ecc_modular_math { struct scatterlist *operand_1; unsigned int operand_1_len; /* In bytes */ struct scatterlist *operand_2; unsigned int operand_2_len; /* In bytes */ struct scatterlist *result; unsigned int result_len; /* In bytes */ }; /** * struct ccp_ecc_point - CCP ECC point definition * @x: the x coordinate of the ECC point * @x_len: the length of the x coordinate * @y: the y coordinate of the ECC point * @y_len: the length of the y coordinate */ struct ccp_ecc_point { struct scatterlist *x; unsigned int x_len; /* In bytes */ struct scatterlist *y; unsigned int y_len; /* In bytes */ }; /** * struct ccp_ecc_point_math - CCP ECC point math parameters * @point_1: the first point of the ECC point math operation * @point_2: the second point of the ECC point math operation * (only used for CCP_ECC_FUNCTION_PADD_384BIT) * @domain_a: the a parameter of the ECC curve * @domain_a_len: the length of the a parameter * @scalar: the scalar parameter for the point match operation * (only used for CCP_ECC_FUNCTION_PMUL_384BIT) * @scalar_len: the length of the scalar parameter * (only used for CCP_ECC_FUNCTION_PMUL_384BIT) * @result: the point resulting from the point math operation */ struct ccp_ecc_point_math { struct ccp_ecc_point point_1; struct ccp_ecc_point point_2; struct scatterlist *domain_a; unsigned int domain_a_len; /* In bytes */ struct scatterlist *scalar; unsigned int scalar_len; /* In bytes */ struct ccp_ecc_point result; }; /** * struct ccp_ecc_engine - CCP ECC operation * @function: ECC function to perform * @mod: ECC modulus * @mod_len: length in bytes of modulus * @mm: module math parameters * @pm: point math parameters * @ecc_result: result of the ECC operation * * Variables required to be set when calling ccp_enqueue_cmd(): * - function, mod, mod_len * - operand, operand_len, operand_count, output, output_len, output_count * - ecc_result */ struct ccp_ecc_engine { enum ccp_ecc_function function; struct scatterlist *mod; u32 mod_len; /* In bytes */ union { struct ccp_ecc_modular_math mm; struct ccp_ecc_point_math pm; } u; u16 ecc_result; }; /** * ccp_engine - CCP operation identifiers * * @CCP_ENGINE_AES: AES operation * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation * @CCP_ENGINE_RSVD1: unused * @CCP_ENGINE_SHA: SHA operation * @CCP_ENGINE_RSA: RSA operation * @CCP_ENGINE_PASSTHRU: pass-through operation * @CCP_ENGINE_ZLIB_DECOMPRESS: unused * @CCP_ENGINE_ECC: ECC operation */ enum ccp_engine { CCP_ENGINE_AES = 0, CCP_ENGINE_XTS_AES_128, CCP_ENGINE_RSVD1, CCP_ENGINE_SHA, CCP_ENGINE_RSA, CCP_ENGINE_PASSTHRU, CCP_ENGINE_ZLIB_DECOMPRESS, CCP_ENGINE_ECC, CCP_ENGINE__LAST, }; /* Flag values for flags member of ccp_cmd */ #define CCP_CMD_MAY_BACKLOG 0x00000001 /** * struct ccp_cmd - CPP operation request * @entry: list element (ccp driver use only) * @work: work element used for callbacks (ccp driver use only) * @ccp: CCP device to be run on (ccp driver use only) * @ret: operation return code (ccp driver use only) * @flags: cmd processing flags * @engine: CCP operation to perform * @engine_error: CCP engine return code * @u: engine specific structures, refer to specific engine struct below * @callback: operation completion callback function * @data: parameter value to be supplied to the callback function * * Variables required to be set when calling ccp_enqueue_cmd(): * - engine, callback * - See the operation structures below for what is required for each * operation. */ struct ccp_cmd { /* The list_head, work_struct, ccp and ret variables are for use * by the CCP driver only. */ struct list_head entry; struct work_struct work; struct ccp_device *ccp; int ret; u32 flags; enum ccp_engine engine; u32 engine_error; union { struct ccp_aes_engine aes; struct ccp_xts_aes_engine xts; struct ccp_sha_engine sha; struct ccp_rsa_engine rsa; struct ccp_passthru_engine passthru; struct ccp_ecc_engine ecc; } u; /* Completion callback support */ void (*callback)(void *data, int err); void *data; }; #endif
#include<stdio.h> int main() { /*istringstream iss; string value = "32 40 50 30 60"; int arr[5]; iss.str (value); //what does this do??? for (int i = 0; i < 5; i++) { int val; iss >> val; iss>>arr[i]; cout << i << "-> " << val <<typeid(val).name()<<endl; } for(int i=0;i<5;i++){ cout<<arr[i]<<" "; }*/ int r; scanf("%d",&r); double ans=2*3.1416*r; printf("%f\n",ans); return 0; }
/****************************************************************************/ /* General menu function, see uifc.h for details. */ /****************************************************************************/ int ulist(int mode, int left, int top, int width, int *cur, int *bar , char *title, char **option) { char str[128]; int i,opts; int optnumlen; int yesno=0; int lines; for(opts=0;opts<MAX_OPTS;opts++) if(option[opts]==NULL || option[opts][0]==0) break; if((*cur)>=opts) (*cur)=opts-1; if((*cur)<0) (*cur)=0; if(opts>999) optnumlen=4; else if(opts>99) optnumlen=3; else if(opts>9) optnumlen=2; else optnumlen=1; while(1) { if(opts==2 && !stricmp(option[0],"Yes") && !stricmp(option[1],"No")) { yesno=1; printf("%s? ",title); } else { printf("\n[%s]\n",title); lines=2; for(i=0;i<opts;i++) { printf("%*d: %s\n",optnumlen,i+1,option[i]); lines++; if(!(lines%api->scrn_len)) { printf("More? "); str[0]=0; getstr(str,sizeof(str)-1); if(toupper(*str)=='N') break; } } str[0]=0; if(mode&WIN_GET) strcat(str,", Copy"); if(mode&WIN_PUT) strcat(str,", Paste"); if(mode&WIN_INS) strcat(str,", Add"); if(mode&WIN_DEL) strcat(str,", Delete"); printf("\nWhich (Help%s or Quit): ",str); } str[0]=0; getstr(str,sizeof(str)-1); truncsp(str); i=atoi(str); if(i>0 && i<=opts) { *cur=--i; return(*cur); } i=atoi(str+1); switch(toupper(*str)) { case 0: case ESC: case 'Q': printf("Quit\n"); return(-1); case 'Y': if(!yesno) break; printf("Yes\n"); return(0); case 'N': if(!yesno) break; printf("No\n"); return(1); case 'H': case '?': printf("Help\n"); help(); break; case 'A': if(!(mode&WIN_INS)) break; if(!opts) return(MSK_INS); if(i>0 && i<=opts+1) return((i-1)|MSK_INS); return(which("Add before",opts+1)|MSK_INS); case 'D': if(!(mode&WIN_DEL)) break; if(!opts) break; if(i>0 && i<=opts) return((i-1)|MSK_DEL); if(opts==1) return(MSK_DEL); return(which("Delete",opts)|MSK_DEL); case 'C': if(!(mode&WIN_GET)) break; if(!opts) break; if(i>0 && i<=opts) return((i-1)|MSK_GET); if(opts==1) return(MSK_GET); return(which("Copy",opts)|MSK_GET); case 'P': if(!(mode&WIN_PUT)) break; if(!opts) break; if(i>0 && i<=opts) return((i-1)|MSK_PUT); if(opts==1) return(MSK_PUT); return(which("Paste",opts)|MSK_PUT); } } }
/* * This function should do the parameter checking and tensor shape inference. */ static void gather_pre_run(ln_op_arg *op_arg) { ln_tensor_entry *te; ln_tensor_list_entry *tle; ln_tensor_list_entry *src1_list_entry; ln_tensor_list_entry *dst_list_entry; ln_tensor_entry *src1_entry; ln_tensor_entry *dst_entry; tl_tensor *src1; tl_tensor *dst; ln_list *src_entries; int src_n; src1_list_entry = ln_tensor_list_find_by_arg_name(op_arg->tensors_in, "src1"); ln_opck_tensor_in_exist(src1_list_entry, "src1"); ln_opck_tensor_defined(src1_list_entry, src1_list_entry->name); src1_entry = ln_tensor_table_find(op_arg->tensor_table, src1_list_entry->name); src1 = src1_entry->tensor; src_n = 0; src_entries = NULL; dst_list_entry = ln_tensor_list_find_by_arg_name(op_arg->tensors_out, "dst"); LN_LIST_FOREACH(tle, op_arg->tensors_in) { if (!ln_streqn(tle->arg_name, "src", 3)) continue; te = ln_tensor_table_find(op_arg->tensor_table, tle->name); ln_opck_tensor_defined(te, tle->name); ln_opck_tensor_issametype(te, src1_entry); ln_opck_tensor_issameshape(te, src1_entry); src_entries = ln_list_append(src_entries, te); src_n++; } dst_list_entry = ln_tensor_list_find_by_arg_name(op_arg->tensors_out, "dst"); ln_opck_tensor_out_exist(dst_list_entry, "dst"); dst_entry = ln_tensor_table_find(op_arg->tensor_table, dst_list_entry->name); ln_opck_tensor_not_defined(dst_entry, dst_list_entry->name); int dst_ndim = src1->ndim + 1; int *dst_dims = ln_alloc(sizeof(int) * dst_ndim); dst_dims[0] = src_n; memmove(&dst_dims[1], src1->dims, sizeof(int) * src1->ndim); dst = tl_tensor_create(NULL, dst_ndim, dst_dims, src1->dtype); dst_entry = ln_tensor_entry_create(dst_list_entry->name, dst); dst_entry->offset = dst_list_entry->offset; ln_tensor_entry_set_creater(dst_entry, op_arg->name); dst_entry->mtype = LN_MEM_NONE; ln_tensor_table_insert(op_arg->tensor_table, dst_entry); ln_free(dst_dims); struct priv_s *priv; priv = ln_alloc(sizeof(struct priv_s)); priv->src_entries = src_entries; priv->dst_entry = dst_entry; op_arg->priv = priv; }
/* *The TMIO controller combines two 8-bit data bytes into one 16-bit *word. This function separates them so nand_base.c works as expected, *especially its NAND_CMD_READID routines. * *To prevent stale data from being read, tmio_nand_hwcontrol() clears *tmio->read_good. */ static u_char tmio_nand_read_byte(struct mtd_info *mtd) { struct tmio_nand *tmio = mtd_to_tmio(mtd); unsigned int data; if (tmio->read_good--) return tmio->read; data = tmio_ioread16(tmio->fcr + FCR_DATA); tmio->read = data >> 8; return data; }
# include <stdio.h> # include <stdlib.h> long long int min (long long int a,long long int b){ if (a<b) return a; else return b; } int abs(int n){ if(n<=0) return (-1*n); else return n; } int main(){ int n,k,i,j; scanf ("%d %d",&n,&k); int *cost; cost=(int*)malloc(n*sizeof(int)); for(i=0;i<n;i++) scanf ("%d",&cost[i]); long long int *res; res=(long long int*)malloc(n*sizeof(long long int)); for(i=0;i<n;i++) res[i]=-1; res[0]=0; for(i=0;i<n;i++){ for(j=i+1;j<min(n,i+k+1);j++){ if(res[j]==-1) res[j]=res[i]+abs(cost[i]-cost[j]); else res[j]=min(res[j],res[i]+abs(cost[i]-cost[j])); } } printf ("%lld\n",res[n-1]); return 0; }
/* Linux driver for NAND Flash Translation Layer */ /* (c) 1999 Machine Vision Holdings, Inc. */ /* Author: David Woodhouse <dwmw2@infradead.org> */ /* $Id: nftlcore.c,v 1.98 2005/11/07 11:14:21 gleixner Exp $ */ /* The contents of this file are distributed under the GNU General Public License version 2. The author places no additional restrictions of any kind on it. */ #define PRERELEASE #include <linux/kernel.h> #include <linux/module.h> #include <asm/errno.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/miscdevice.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/hdreg.h> #include <linux/kmod.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nftl.h> #include <linux/mtd/blktrans.h> /* maximum number of loops while examining next block, to have a chance to detect consistency problems (they should never happen because of the checks done in the mounting */ #define MAX_LOOPS 10000 static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) { struct NFTLrecord *nftl; unsigned long temp; if (mtd->type != MTD_NANDFLASH) return; /* OK, this is moderately ugly. But probably safe. Alternatives? */ if (memcmp(mtd->name, "DiskOnChip", 10)) return; if (!mtd->block_isbad) { printk(KERN_ERR "NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n" "Please use the new diskonchip driver under the NAND subsystem.\n"); return; } DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name); nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL); if (!nftl) { printk(KERN_WARNING "NFTL: out of memory for data structures\n"); return; } nftl->mbd.mtd = mtd; nftl->mbd.devnum = -1; nftl->mbd.tr = tr; if (NFTL_mount(nftl) < 0) { printk(KERN_WARNING "NFTL: could not mount device\n"); kfree(nftl); return; } /* OK, it's a new one. Set up all the data structures. */ /* Calculate geometry */ nftl->cylinders = 1024; nftl->heads = 16; temp = nftl->cylinders * nftl->heads; nftl->sectors = nftl->mbd.size / temp; if (nftl->mbd.size % temp) { nftl->sectors++; temp = nftl->cylinders * nftl->sectors; nftl->heads = nftl->mbd.size / temp; if (nftl->mbd.size % temp) { nftl->heads++; temp = nftl->heads * nftl->sectors; nftl->cylinders = nftl->mbd.size / temp; } } if (nftl->mbd.size != nftl->heads * nftl->cylinders * nftl->sectors) { /* Oh no we don't have mbd.size == heads * cylinders * sectors */ printk(KERN_WARNING "NFTL: cannot calculate a geometry to " "match size of 0x%lx.\n", nftl->mbd.size); printk(KERN_WARNING "NFTL: using C:%d H:%d S:%d " "(== 0x%lx sects)\n", nftl->cylinders, nftl->heads , nftl->sectors, (long)nftl->cylinders * (long)nftl->heads * (long)nftl->sectors ); } if (add_mtd_blktrans_dev(&nftl->mbd)) { kfree(nftl->ReplUnitTable); kfree(nftl->EUNtable); kfree(nftl); return; } #ifdef PSYCHO_DEBUG printk(KERN_INFO "NFTL: Found new nftl%c\n", nftl->mbd.devnum + 'a'); #endif } static void nftl_remove_dev(struct mtd_blktrans_dev *dev) { struct NFTLrecord *nftl = (void *)dev; DEBUG(MTD_DEBUG_LEVEL1, "NFTL: remove_dev (i=%d)\n", dev->devnum); del_mtd_blktrans_dev(dev); kfree(nftl->ReplUnitTable); kfree(nftl->EUNtable); kfree(nftl); } /* * Read oob data from flash */ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf) { struct mtd_oob_ops ops; int res; ops.mode = MTD_OOB_PLACE; ops.ooboffs = offs & (mtd->writesize - 1); ops.ooblen = len; ops.oobbuf = buf; ops.datbuf = NULL; res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops); *retlen = ops.oobretlen; return res; } /* * Write oob data to flash */ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf) { struct mtd_oob_ops ops; int res; ops.mode = MTD_OOB_PLACE; ops.ooboffs = offs & (mtd->writesize - 1); ops.ooblen = len; ops.oobbuf = buf; ops.datbuf = NULL; res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); *retlen = ops.oobretlen; return res; } #ifdef CONFIG_NFTL_RW /* * Write data and oob to flash */ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len, size_t *retlen, uint8_t *buf, uint8_t *oob) { struct mtd_oob_ops ops; int res; ops.mode = MTD_OOB_PLACE; ops.ooboffs = offs; ops.ooblen = mtd->oobsize; ops.oobbuf = oob; ops.datbuf = buf; ops.len = len; res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); *retlen = ops.retlen; return res; } /* Actual NFTL access routines */ /* NFTL_findfreeblock: Find a free Erase Unit on the NFTL partition. This function is used * when the give Virtual Unit Chain */ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate ) { /* For a given Virtual Unit Chain: find or create a free block and add it to the chain */ /* We're passed the number of the last EUN in the chain, to save us from having to look it up again */ u16 pot = nftl->LastFreeEUN; int silly = nftl->nb_blocks; /* Normally, we force a fold to happen before we run out of free blocks completely */ if (!desperate && nftl->numfreeEUNs < 2) { DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n"); return 0xffff; } /* Scan for a free block */ do { if (nftl->ReplUnitTable[pot] == BLOCK_FREE) { nftl->LastFreeEUN = pot; nftl->numfreeEUNs--; return pot; } /* This will probably point to the MediaHdr unit itself, right at the beginning of the partition. But that unit (and the backup unit too) should have the UCI set up so that it's not selected for overwriting */ if (++pot > nftl->lastEUN) pot = le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN); if (!silly--) { printk("Argh! No free blocks found! LastFreeEUN = %d, " "FirstEUN = %d\n", nftl->LastFreeEUN, le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN)); return 0xffff; } } while (pot != nftl->LastFreeEUN); return 0xffff; } static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock ) { struct mtd_info *mtd = nftl->mbd.mtd; u16 BlockMap[MAX_SECTORS_PER_UNIT]; unsigned char BlockLastState[MAX_SECTORS_PER_UNIT]; unsigned char BlockFreeFound[MAX_SECTORS_PER_UNIT]; unsigned int thisEUN; int block; int silly; unsigned int targetEUN; struct nftl_oob oob; int inplace = 1; size_t retlen; memset(BlockMap, 0xff, sizeof(BlockMap)); memset(BlockFreeFound, 0, sizeof(BlockFreeFound)); thisEUN = nftl->EUNtable[thisVUC]; if (thisEUN == BLOCK_NIL) { printk(KERN_WARNING "Trying to fold non-existent " "Virtual Unit Chain %d!\n", thisVUC); return BLOCK_NIL; } /* Scan to find the Erase Unit which holds the actual data for each 512-byte block within the Chain. */ silly = MAX_LOOPS; targetEUN = BLOCK_NIL; while (thisEUN <= nftl->lastEUN ) { unsigned int status, foldmark; targetEUN = thisEUN; for (block = 0; block < nftl->EraseSize / 512; block ++) { nftl_read_oob(mtd, (thisEUN * nftl->EraseSize) + (block * 512), 16 , &retlen, (char *)&oob); if (block == 2) { foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1; if (foldmark == FOLD_MARK_IN_PROGRESS) { DEBUG(MTD_DEBUG_LEVEL1, "Write Inhibited on EUN %d\n", thisEUN); inplace = 0; } else { /* There's no other reason not to do inplace, except ones that come later. So we don't need to preserve inplace */ inplace = 1; } } status = oob.b.Status | oob.b.Status1; BlockLastState[block] = status; switch(status) { case SECTOR_FREE: BlockFreeFound[block] = 1; break; case SECTOR_USED: if (!BlockFreeFound[block]) BlockMap[block] = thisEUN; else printk(KERN_WARNING "SECTOR_USED found after SECTOR_FREE " "in Virtual Unit Chain %d for block %d\n", thisVUC, block); break; case SECTOR_DELETED: if (!BlockFreeFound[block]) BlockMap[block] = BLOCK_NIL; else printk(KERN_WARNING "SECTOR_DELETED found after SECTOR_FREE " "in Virtual Unit Chain %d for block %d\n", thisVUC, block); break; case SECTOR_IGNORE: break; default: printk("Unknown status for block %d in EUN %d: %x\n", block, thisEUN, status); } } if (!silly--) { printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%x\n", thisVUC); return BLOCK_NIL; } thisEUN = nftl->ReplUnitTable[thisEUN]; } if (inplace) { /* We're being asked to be a fold-in-place. Check that all blocks which actually have data associated with them (i.e. BlockMap[block] != BLOCK_NIL) are either already present or SECTOR_FREE in the target block. If not, we're going to have to fold out-of-place anyway. */ for (block = 0; block < nftl->EraseSize / 512 ; block++) { if (BlockLastState[block] != SECTOR_FREE && BlockMap[block] != BLOCK_NIL && BlockMap[block] != targetEUN) { DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, " "block %d was %x lastEUN, " "and is in EUN %d (%s) %d\n", thisVUC, block, BlockLastState[block], BlockMap[block], BlockMap[block]== targetEUN ? "==" : "!=", targetEUN); inplace = 0; break; } } if (pendingblock >= (thisVUC * (nftl->EraseSize / 512)) && pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) && BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] != SECTOR_FREE) { DEBUG(MTD_DEBUG_LEVEL1, "Pending write not free in EUN %d. " "Folding out of place.\n", targetEUN); inplace = 0; } } if (!inplace) { DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. " "Trying out-of-place\n", thisVUC); /* We need to find a targetEUN to fold into. */ targetEUN = NFTL_findfreeblock(nftl, 1); if (targetEUN == BLOCK_NIL) { /* Ouch. Now we're screwed. We need to do a fold-in-place of another chain to make room for this one. We need a better way of selecting which chain to fold, because makefreeblock will only ask us to fold the same one again. */ printk(KERN_WARNING "NFTL_findfreeblock(desperate) returns 0xffff.\n"); return BLOCK_NIL; } } else { /* We put a fold mark in the chain we are folding only if we fold in place to help the mount check code. If we do not fold in place, it is possible to find the valid chain by selecting the longer one */ oob.u.c.FoldMark = oob.u.c.FoldMark1 = cpu_to_le16(FOLD_MARK_IN_PROGRESS); oob.u.c.unused = 0xffffffff; nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 2 * 512 + 8, 8, &retlen, (char *)&oob.u); } /* OK. We now know the location of every block in the Virtual Unit Chain, and the Erase Unit into which we are supposed to be copying. Go for it. */ DEBUG(MTD_DEBUG_LEVEL1,"Folding chain %d into unit %d\n", thisVUC, targetEUN); for (block = 0; block < nftl->EraseSize / 512 ; block++) { unsigned char movebuf[512]; int ret; /* If it's in the target EUN already, or if it's pending write, do nothing */ if (BlockMap[block] == targetEUN || (pendingblock == (thisVUC * (nftl->EraseSize / 512) + block))) { continue; } /* copy only in non free block (free blocks can only happen in case of media errors or deleted blocks) */ if (BlockMap[block] == BLOCK_NIL) continue; ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), 512, &retlen, movebuf); if (ret < 0 && ret != -EUCLEAN) { ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), 512, &retlen, movebuf); if (ret != -EIO) printk("Error went away on retry.\n"); } memset(&oob, 0xff, sizeof(struct nftl_oob)); oob.b.Status = oob.b.Status1 = SECTOR_USED; nftl_write(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + (block * 512), 512, &retlen, movebuf, (char *)&oob); } /* add the header so that it is now a valid chain */ oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC); oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff; nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8, 8, &retlen, (char *)&oob.u); /* OK. We've moved the whole lot into the new block. Now we have to free the original blocks. */ /* At this point, we have two different chains for this Virtual Unit, and no way to tell them apart. If we crash now, we get confused. However, both contain the same data, so we shouldn't actually lose data in this case. It's just that when we load up on a medium which has duplicate chains, we need to free one of the chains because it's not necessary any more. */ thisEUN = nftl->EUNtable[thisVUC]; DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n"); /* For each block in the old chain (except the targetEUN of course), free it and make it available for future use */ while (thisEUN <= nftl->lastEUN && thisEUN != targetEUN) { unsigned int EUNtmp; EUNtmp = nftl->ReplUnitTable[thisEUN]; if (NFTL_formatblock(nftl, thisEUN) < 0) { /* could not erase : mark block as reserved */ nftl->ReplUnitTable[thisEUN] = BLOCK_RESERVED; } else { /* correctly erased : mark it as free */ nftl->ReplUnitTable[thisEUN] = BLOCK_FREE; nftl->numfreeEUNs++; } thisEUN = EUNtmp; } /* Make this the new start of chain for thisVUC */ nftl->ReplUnitTable[targetEUN] = BLOCK_NIL; nftl->EUNtable[thisVUC] = targetEUN; return targetEUN; } static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock) { /* This is the part that needs some cleverness applied. For now, I'm doing the minimum applicable to actually get the thing to work. Wear-levelling and other clever stuff needs to be implemented and we also need to do some assessment of the results when the system loses power half-way through the routine. */ u16 LongestChain = 0; u16 ChainLength = 0, thislen; u16 chain, EUN; for (chain = 0; chain < le32_to_cpu(nftl->MediaHdr.FormattedSize) / nftl->EraseSize; chain++) { EUN = nftl->EUNtable[chain]; thislen = 0; while (EUN <= nftl->lastEUN) { thislen++; //printk("VUC %d reaches len %d with EUN %d\n", chain, thislen, EUN); EUN = nftl->ReplUnitTable[EUN] & 0x7fff; if (thislen > 0xff00) { printk("Endless loop in Virtual Chain %d: Unit %x\n", chain, EUN); } if (thislen > 0xff10) { /* Actually, don't return failure. Just ignore this chain and get on with it. */ thislen = 0; break; } } if (thislen > ChainLength) { //printk("New longest chain is %d with length %d\n", chain, thislen); ChainLength = thislen; LongestChain = chain; } } if (ChainLength < 2) { printk(KERN_WARNING "No Virtual Unit Chains available for folding. " "Failing request\n"); return 0xffff; } return NFTL_foldchain (nftl, LongestChain, pendingblock); } /* NFTL_findwriteunit: Return the unit number into which we can write for this block. Make it available if it isn't already */ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block) { u16 lastEUN; u16 thisVUC = block / (nftl->EraseSize / 512); struct mtd_info *mtd = nftl->mbd.mtd; unsigned int writeEUN; unsigned long blockofs = (block * 512) & (nftl->EraseSize -1); size_t retlen; int silly, silly2 = 3; struct nftl_oob oob; do { /* Scan the media to find a unit in the VUC which has a free space for the block in question. */ /* This condition catches the 0x[7f]fff cases, as well as being a sanity check for past-end-of-media access */ lastEUN = BLOCK_NIL; writeEUN = nftl->EUNtable[thisVUC]; silly = MAX_LOOPS; while (writeEUN <= nftl->lastEUN) { struct nftl_bci bci; size_t retlen; unsigned int status; lastEUN = writeEUN; nftl_read_oob(mtd, (writeEUN * nftl->EraseSize) + blockofs, 8, &retlen, (char *)&bci); DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n", block , writeEUN, le16_to_cpu(bci.Status)); status = bci.Status | bci.Status1; switch(status) { case SECTOR_FREE: return writeEUN; case SECTOR_DELETED: case SECTOR_USED: case SECTOR_IGNORE: break; default: // Invalid block. Don't use it any more. Must implement. break; } if (!silly--) { printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%x\n", thisVUC); return 0xffff; } /* Skip to next block in chain */ writeEUN = nftl->ReplUnitTable[writeEUN]; } /* OK. We didn't find one in the existing chain, or there is no existing chain. */ /* Try to find an already-free block */ writeEUN = NFTL_findfreeblock(nftl, 0); if (writeEUN == BLOCK_NIL) { /* That didn't work - there were no free blocks just waiting to be picked up. We're going to have to fold a chain to make room. */ /* First remember the start of this chain */ //u16 startEUN = nftl->EUNtable[thisVUC]; //printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC); writeEUN = NFTL_makefreeblock(nftl, 0xffff); if (writeEUN == BLOCK_NIL) { /* OK, we accept that the above comment is lying - there may have been free blocks last time we called NFTL_findfreeblock(), but they are reserved for when we're desperate. Well, now we're desperate. */ DEBUG(MTD_DEBUG_LEVEL1, "Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC); writeEUN = NFTL_findfreeblock(nftl, 1); } if (writeEUN == BLOCK_NIL) { /* Ouch. This should never happen - we should always be able to make some room somehow. If we get here, we've allocated more storage space than actual media, or our makefreeblock routine is missing something. */ printk(KERN_WARNING "Cannot make free space.\n"); return BLOCK_NIL; } //printk("Restarting scan\n"); lastEUN = BLOCK_NIL; continue; } /* We've found a free block. Insert it into the chain. */ if (lastEUN != BLOCK_NIL) { thisVUC |= 0x8000; /* It's a replacement block */ } else { /* The first block in a new chain */ nftl->EUNtable[thisVUC] = writeEUN; } /* set up the actual EUN we're writing into */ /* Both in our cache... */ nftl->ReplUnitTable[writeEUN] = BLOCK_NIL; /* ... and on the flash itself */ nftl_read_oob(mtd, writeEUN * nftl->EraseSize + 8, 8, &retlen, (char *)&oob.u); oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC); nftl_write_oob(mtd, writeEUN * nftl->EraseSize + 8, 8, &retlen, (char *)&oob.u); /* we link the new block to the chain only after the block is ready. It avoids the case where the chain could point to a free block */ if (lastEUN != BLOCK_NIL) { /* Both in our cache... */ nftl->ReplUnitTable[lastEUN] = writeEUN; /* ... and on the flash itself */ nftl_read_oob(mtd, (lastEUN * nftl->EraseSize) + 8, 8, &retlen, (char *)&oob.u); oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = cpu_to_le16(writeEUN); nftl_write_oob(mtd, (lastEUN * nftl->EraseSize) + 8, 8, &retlen, (char *)&oob.u); } return writeEUN; } while (silly2--); printk(KERN_WARNING "Error folding to make room for Virtual Unit Chain 0x%x\n", thisVUC); return 0xffff; } static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block, char *buffer) { struct NFTLrecord *nftl = (void *)mbd; u16 writeEUN; unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1); size_t retlen; struct nftl_oob oob; writeEUN = NFTL_findwriteunit(nftl, block); if (writeEUN == BLOCK_NIL) { printk(KERN_WARNING "NFTL_writeblock(): Cannot find block to write to\n"); /* If we _still_ haven't got a block to use, we're screwed */ return 1; } memset(&oob, 0xff, sizeof(struct nftl_oob)); oob.b.Status = oob.b.Status1 = SECTOR_USED; nftl_write(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs, 512, &retlen, (char *)buffer, (char *)&oob); return 0; } #endif /* CONFIG_NFTL_RW */ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block, char *buffer) { struct NFTLrecord *nftl = (void *)mbd; struct mtd_info *mtd = nftl->mbd.mtd; u16 lastgoodEUN; u16 thisEUN = nftl->EUNtable[block / (nftl->EraseSize / 512)]; unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1); unsigned int status; int silly = MAX_LOOPS; size_t retlen; struct nftl_bci bci; lastgoodEUN = BLOCK_NIL; if (thisEUN != BLOCK_NIL) { while (thisEUN < nftl->nb_blocks) { if (nftl_read_oob(mtd, (thisEUN * nftl->EraseSize) + blockofs, 8, &retlen, (char *)&bci) < 0) status = SECTOR_IGNORE; else status = bci.Status | bci.Status1; switch (status) { case SECTOR_FREE: /* no modification of a sector should follow a free sector */ goto the_end; case SECTOR_DELETED: lastgoodEUN = BLOCK_NIL; break; case SECTOR_USED: lastgoodEUN = thisEUN; break; case SECTOR_IGNORE: break; default: printk("Unknown status for block %ld in EUN %d: %x\n", block, thisEUN, status); break; } if (!silly--) { printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%lx\n", block / (nftl->EraseSize / 512)); return 1; } thisEUN = nftl->ReplUnitTable[thisEUN]; } } the_end: if (lastgoodEUN == BLOCK_NIL) { /* the requested block is not on the media, return all 0x00 */ memset(buffer, 0, 512); } else { loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs; size_t retlen; int res = mtd->read(mtd, ptr, 512, &retlen, buffer); if (res < 0 && res != -EUCLEAN) return -EIO; } return 0; } static int nftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) { struct NFTLrecord *nftl = (void *)dev; geo->heads = nftl->heads; geo->sectors = nftl->sectors; geo->cylinders = nftl->cylinders; return 0; } /**************************************************************************** * * Module stuff * ****************************************************************************/ static struct mtd_blktrans_ops nftl_tr = { .name = "nftl", .major = NFTL_MAJOR, .part_bits = NFTL_PARTN_BITS, .blksize = 512, .getgeo = nftl_getgeo, .readsect = nftl_readblock, #ifdef CONFIG_NFTL_RW .writesect = nftl_writeblock, #endif .add_mtd = nftl_add_mtd, .remove_dev = nftl_remove_dev, .owner = THIS_MODULE, }; extern char nftlmountrev[]; static int __init init_nftl(void) { printk(KERN_INFO "NFTL driver: nftlcore.c $Revision: 1.98 $, nftlmount.c %s\n", nftlmountrev); return register_mtd_blktrans(&nftl_tr); } static void __exit cleanup_nftl(void) { deregister_mtd_blktrans(&nftl_tr); } module_init(init_nftl); module_exit(cleanup_nftl); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>, Fabrice Bellard <fabrice.bellard@netgem.com> et al."); MODULE_DESCRIPTION("Support code for NAND Flash Translation Layer, used on M-Systems DiskOnChip 2000 and Millennium");
// only needs to support the layouts in test_layouts static void to_wave_format_layout(const struct SoundIoChannelLayout *layout, WAVEFORMATEXTENSIBLE *wave_format) { wave_format->dwChannelMask = 0; wave_format->Format.nChannels = layout->channel_count; for (int i = 0; i < layout->channel_count; i += 1) { enum SoundIoChannelId channel_id = layout->channels[i]; switch (channel_id) { case SoundIoChannelIdFrontLeft: wave_format->dwChannelMask |= SPEAKER_FRONT_LEFT; break; case SoundIoChannelIdFrontRight: wave_format->dwChannelMask |= SPEAKER_FRONT_RIGHT; break; case SoundIoChannelIdFrontCenter: wave_format->dwChannelMask |= SPEAKER_FRONT_CENTER; break; case SoundIoChannelIdLfe: wave_format->dwChannelMask |= SPEAKER_LOW_FREQUENCY; break; case SoundIoChannelIdBackLeft: wave_format->dwChannelMask |= SPEAKER_BACK_LEFT; break; case SoundIoChannelIdBackRight: wave_format->dwChannelMask |= SPEAKER_BACK_RIGHT; break; case SoundIoChannelIdFrontLeftCenter: wave_format->dwChannelMask |= SPEAKER_FRONT_LEFT_OF_CENTER; break; case SoundIoChannelIdFrontRightCenter: wave_format->dwChannelMask |= SPEAKER_FRONT_RIGHT_OF_CENTER; break; case SoundIoChannelIdBackCenter: wave_format->dwChannelMask |= SPEAKER_BACK_CENTER; break; case SoundIoChannelIdSideLeft: wave_format->dwChannelMask |= SPEAKER_SIDE_LEFT; break; case SoundIoChannelIdSideRight: wave_format->dwChannelMask |= SPEAKER_SIDE_RIGHT; break; case SoundIoChannelIdTopCenter: wave_format->dwChannelMask |= SPEAKER_TOP_CENTER; break; case SoundIoChannelIdTopFrontLeft: wave_format->dwChannelMask |= SPEAKER_TOP_FRONT_LEFT; break; case SoundIoChannelIdTopFrontCenter: wave_format->dwChannelMask |= SPEAKER_TOP_FRONT_CENTER; break; case SoundIoChannelIdTopFrontRight: wave_format->dwChannelMask |= SPEAKER_TOP_FRONT_RIGHT; break; case SoundIoChannelIdTopBackLeft: wave_format->dwChannelMask |= SPEAKER_TOP_BACK_LEFT; break; case SoundIoChannelIdTopBackCenter: wave_format->dwChannelMask |= SPEAKER_TOP_BACK_CENTER; break; case SoundIoChannelIdTopBackRight: wave_format->dwChannelMask |= SPEAKER_TOP_BACK_RIGHT; break; default: soundio_panic("to_wave_format_layout: unsupported channel id"); } } }
/* * Generated by asn1c-0.9.29 (http://lionet.info/asn1c) * From ASN.1 module "E2AP-PDU-Contents" * found in "/home/rshacham/e2ap-v01.00.00.asn" * `asn1c -fcompound-names -fincludes-quoted -fno-include-deps -findirect-choice -gen-PER -no-gen-OER -D .` */ #ifndef _E2setupFailure_H_ #define _E2setupFailure_H_ #include "asn_application.h" /* Including external dependencies */ #include "ProtocolIE-Container.h" #include "constr_SEQUENCE.h" #ifdef __cplusplus extern "C" { #endif /* E2setupFailure */ typedef struct E2setupFailure { ProtocolIE_Container_1527P13_t protocolIEs; /* * This type is extensible, * possible extensions are below. */ /* Context for parsing across buffer boundaries */ asn_struct_ctx_t _asn_ctx; } E2setupFailure_t; /* Implementation */ extern asn_TYPE_descriptor_t asn_DEF_E2setupFailure; extern asn_SEQUENCE_specifics_t asn_SPC_E2setupFailure_specs_1; extern asn_TYPE_member_t asn_MBR_E2setupFailure_1[1]; #ifdef __cplusplus } #endif #endif /* _E2setupFailure_H_ */ #include "asn_internal.h"
/** * cifs_backup_query_path_info - SMB1 fallback code to get ino * * Fallback code to get file metadata when we don't have access to * full_path (EACCES) and have backup creds. * * @xid: transaction id used to identify original request in logs * @tcon: information about the server share we have mounted * @sb: the superblock stores info such as disk space available * @full_path: name of the file we are getting the metadata for * @resp_buf: will be set to cifs resp buf and needs to be freed with * cifs_buf_release() when done with @data * @data: will be set to search info result buffer */ static int cifs_backup_query_path_info(int xid, struct cifs_tcon *tcon, struct super_block *sb, const char *full_path, void **resp_buf, FILE_ALL_INFO **data) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_search_info info = {0}; u16 flags; int rc; *resp_buf = NULL; info.endOfSearch = false; if (tcon->unix_ext) info.info_level = SMB_FIND_FILE_UNIX; else if ((tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find) == 0) info.info_level = SMB_FIND_FILE_INFO_STANDARD; else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) info.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO; else info.info_level = SMB_FIND_FILE_DIRECTORY_INFO; flags = CIFS_SEARCH_CLOSE_ALWAYS | CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_BACKUP_SEARCH; rc = CIFSFindFirst(xid, tcon, full_path, cifs_sb, NULL, flags, &info, false); if (rc) return rc; *resp_buf = (void *)info.ntwrk_buf_start; *data = (FILE_ALL_INFO *)info.srch_entries_start; return 0; }
/* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks other than the exit and latch of the LOOP. Also resets the GIMPLE_DEBUG information. */ static void remove_conditions_and_labels (loop_p loop) { gimple_stmt_iterator gsi; unsigned int i; for (i = 0; i < loop->num_nodes; i++) { basic_block bb = ifc_bbs[i]; if (bb_with_exit_edge_p (loop, bb) || bb == loop->latch) continue; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ) switch (gimple_code (gsi_stmt (gsi))) { case GIMPLE_COND: case GIMPLE_LABEL: gsi_remove (&gsi, true); break; case GIMPLE_DEBUG: if (gimple_debug_bind_p (gsi_stmt (gsi))) { gimple_debug_bind_reset_value (gsi_stmt (gsi)); update_stmt (gsi_stmt (gsi)); } gsi_next (&gsi); break; default: gsi_next (&gsi); } } }
/* The following initialization and clean-up is required. */ void start_picture() { fonts_used = graphics_used = 0; str_f = -1; str_v = 0.0; str_h2 = 0.0; str_size = 0.0; fprintf(mpxf,"begingroup save C,D,p,s,n; picture p; p=nullpicture;\n"); }
/* Given src, dst and key, find appropriate for input tunnel. */ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, __be32 remote, __be32 local, __be32 key, __be16 gre_proto) { struct net *net = dev_net(dev); int link = dev->ifindex; unsigned h0 = HASH(remote); unsigned h1 = HASH(key); struct ip_tunnel *t, *cand = NULL; struct ipgre_net *ign = net_generic(net, ipgre_net_id); int dev_type = (gre_proto == htons(ETH_P_TEB)) ? ARPHRD_ETHER : ARPHRD_IPGRE; int score, cand_score = 4; for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) { if (local != t->parms.iph.saddr || remote != t->parms.iph.daddr || key != t->parms.i_key || !(t->dev->flags & IFF_UP)) continue; if (t->dev->type != ARPHRD_IPGRE && t->dev->type != dev_type) continue; score = 0; if (t->parms.link != link) score |= 1; if (t->dev->type != dev_type) score |= 2; if (score == 0) return t; if (score < cand_score) { cand = t; cand_score = score; } } for (t = ign->tunnels_r[h0^h1]; t; t = t->next) { if (remote != t->parms.iph.daddr || key != t->parms.i_key || !(t->dev->flags & IFF_UP)) continue; if (t->dev->type != ARPHRD_IPGRE && t->dev->type != dev_type) continue; score = 0; if (t->parms.link != link) score |= 1; if (t->dev->type != dev_type) score |= 2; if (score == 0) return t; if (score < cand_score) { cand = t; cand_score = score; } } for (t = ign->tunnels_l[h1]; t; t = t->next) { if ((local != t->parms.iph.saddr && (local != t->parms.iph.daddr || !ipv4_is_multicast(local))) || key != t->parms.i_key || !(t->dev->flags & IFF_UP)) continue; if (t->dev->type != ARPHRD_IPGRE && t->dev->type != dev_type) continue; score = 0; if (t->parms.link != link) score |= 1; if (t->dev->type != dev_type) score |= 2; if (score == 0) return t; if (score < cand_score) { cand = t; cand_score = score; } } for (t = ign->tunnels_wc[h1]; t; t = t->next) { if (t->parms.i_key != key || !(t->dev->flags & IFF_UP)) continue; if (t->dev->type != ARPHRD_IPGRE && t->dev->type != dev_type) continue; score = 0; if (t->parms.link != link) score |= 1; if (t->dev->type != dev_type) score |= 2; if (score == 0) return t; if (score < cand_score) { cand = t; cand_score = score; } } if (cand != NULL) return cand; if (ign->fb_tunnel_dev->flags & IFF_UP) return netdev_priv(ign->fb_tunnel_dev); return NULL; }
/* * OGF/Graphite: Geometry and Graphics Programming Library + Utilities * Copyright (C) 2000-2015 INRIA - Project ALICE * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * If you modify this software, you should include a notice giving the * name of the person performing the modification, the date of modification, * and the reason for such modification. * * Contact for Graphite: Bruno Levy - Bruno.Levy@inria.fr * Contact for this Plugin: Nicolas Ray - nicolas.ray@inria.fr * * Project ALICE * LORIA, INRIA Lorraine, * Campus Scientifique, BP 239 * 54506 VANDOEUVRE LES NANCY CEDEX * FRANCE * * Note that the GNU General Public License does not permit incorporating * the Software into proprietary programs. * * As an exception to the GPL, Graphite can be linked with the following * (non-GPL) libraries: * Qt, tetgen, SuperLU, WildMagic and CGAL */ #ifndef H_HEXDOM_ALGO_INTERSECT_TOOLS_H #define H_HEXDOM_ALGO_INTERSECT_TOOLS_H #include <exploragram/basic/common.h> #include <exploragram/hexdom/basic.h> #include <geogram/mesh/mesh.h> namespace GEO { extern const index_t EXPLORAGRAM_API quad_rand_split[2][3]; extern const index_t EXPLORAGRAM_API quad_split[4][3]; extern const index_t EXPLORAGRAM_API diamon_split[12][3]; extern const index_t EXPLORAGRAM_API diamon_quad_split[16][3]; struct EXPLORAGRAM_API BBox { BBox() { min = vec3(1e20, 1e20, 1e20); max = vec3(-1e20, -1e20, -1e20); } bool intersect(const BBox& b) const; bool contains(const vec3& v) const; bool is_null() const; void add(const BBox& b); void add(const vec3& P); void dilate(double eps) { min -= vec3(eps, eps, eps); max += vec3(eps, eps, eps); } vec3 bary() const; vec3 min; vec3 max; }; struct EXPLORAGRAM_API HBoxes { HBoxes() { } HBoxes(vector<BBox>& inboxes) { init(inboxes); } void init(vector<BBox>& inboxes); ~HBoxes() { } void sort(vector<vec3> &G, index_t org, index_t dest); void intersect(BBox& b, vector<index_t>& primitives, index_t node = 0); int STAT_nb_visits; int STAT_nb_leafs; int STAT_nb_requests; index_t offset; vector<index_t> tree_pos_to_org; vector<BBox> tree; }; struct EXPLORAGRAM_API DynamicHBoxes { void init(vector<BBox>& inboxes); void intersect(BBox& b, vector<index_t>& primitives); void update_bbox(index_t id, BBox b = BBox()); HBoxes hbox; vector<index_t> moved; vector<BBox> movedbbox; }; // double tetra_volume(vec3 A, vec3 B, vec3 C, vec3 D); double tetra_volume_sign(vec3 A, vec3 B, vec3 C, vec3 D); bool same_sign(double a, double b); //bool naive_tri_tri_intersect(vec3 v0, vec3 v1, vec3 v2, vec3 u0, vec3 u1, vec3 u2); vector<BBox> facets_bbox(Mesh* m); struct FacetIntersect { FacetIntersect(Mesh* p_m); vector<index_t> get_intersections(vector<vec3>& P); vector<index_t> get_intersections(index_t& f); vector<BBox> inboxes; DynamicHBoxes hb; Mesh* m; }; bool polyintersect(vector<vec3>& P, vector<vec3>& Q); vector<index_t> get_intersecting_faces(Mesh* m); void check_no_intersecting_faces(Mesh* m,bool allow_duplicated = false); } #endif
/* Resize the table to the minimal size that contains all the elements, * but with the invariant of a USED/BUCKETS ratio near to <= 1 */ int dictResize(dict *d) { int minimal; if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR; minimal = d->ht[0].used; if (minimal < DICT_HT_INITIAL_SIZE) minimal = DICT_HT_INITIAL_SIZE; return dictExpand(d, minimal); }
/* * Initializes the haptic device for simple rumble playback. */ int SDL_HapticRumbleInit(SDL_Haptic * haptic) { SDL_HapticEffect *efx = &haptic->rumble_effect; if (!ValidHaptic(haptic)) { return -1; } if (haptic->rumble_id >= 0) { return 0; } SDL_zerop(efx); if (haptic->supported & SDL_HAPTIC_SINE) { efx->type = SDL_HAPTIC_SINE; efx->periodic.direction.type = SDL_HAPTIC_CARTESIAN; efx->periodic.period = 1000; efx->periodic.magnitude = 0x4000; efx->periodic.length = 5000; efx->periodic.attack_length = 0; efx->periodic.fade_length = 0; } else if (haptic->supported & SDL_HAPTIC_LEFTRIGHT) { efx->type = SDL_HAPTIC_LEFTRIGHT; efx->leftright.length = 5000; efx->leftright.large_magnitude = 0x4000; efx->leftright.small_magnitude = 0x4000; } else { return SDL_SetError("Device doesn't support rumble"); } haptic->rumble_id = SDL_HapticNewEffect(haptic, &haptic->rumble_effect); if (haptic->rumble_id >= 0) { return 0; } return -1; }
/* * Get object comments * * \dd [foo] * * Note: This command only lists comments for object types which do not have * their comments displayed by their own backslash commands. The following * types of objects will be displayed: constraint, operator class, * operator family, rule, and trigger. * */ bool objectDescription(const char *pattern, bool showSystem) { PQExpBufferData buf; PGresult *res; printQueryOpt myopt = pset.popt; static const bool translate_columns[] = {false, false, true, false}; initPQExpBuffer(&buf); appendPQExpBuffer(&buf, "SELECT DISTINCT tt.nspname AS \"%s\", tt.name AS \"%s\", tt.object AS \"%s\", d.description AS \"%s\"\n" "FROM (\n", gettext_noop("Schema"), gettext_noop("Name"), gettext_noop("Object"), gettext_noop("Description")); appendPQExpBuffer(&buf, " SELECT pgc.oid as oid, pgc.tableoid AS tableoid,\n" " n.nspname as nspname,\n" " CAST(pgc.conname AS pg_catalog.text) as name," " CAST('%s' AS pg_catalog.text) as object\n" " FROM pg_catalog.pg_constraint pgc\n" " JOIN pg_catalog.pg_class c " "ON c.oid = pgc.conrelid\n" " LEFT JOIN pg_catalog.pg_namespace n " " ON n.oid = c.relnamespace\n", gettext_noop("table constraint")); if (!showSystem && !pattern) appendPQExpBufferStr(&buf, "WHERE n.nspname <> 'pg_catalog'\n" " AND n.nspname <> 'information_schema'\n"); processSQLNamePattern(pset.db, &buf, pattern, !showSystem && !pattern, false, "n.nspname", "pgc.conname", NULL, "pg_catalog.pg_table_is_visible(c.oid)"); appendPQExpBuffer(&buf, "UNION ALL\n" " SELECT pgc.oid as oid, pgc.tableoid AS tableoid,\n" " n.nspname as nspname,\n" " CAST(pgc.conname AS pg_catalog.text) as name," " CAST('%s' AS pg_catalog.text) as object\n" " FROM pg_catalog.pg_constraint pgc\n" " JOIN pg_catalog.pg_type t " "ON t.oid = pgc.contypid\n" " LEFT JOIN pg_catalog.pg_namespace n " " ON n.oid = t.typnamespace\n", gettext_noop("domain constraint")); if (!showSystem && !pattern) appendPQExpBufferStr(&buf, "WHERE n.nspname <> 'pg_catalog'\n" " AND n.nspname <> 'information_schema'\n"); processSQLNamePattern(pset.db, &buf, pattern, !showSystem && !pattern, false, "n.nspname", "pgc.conname", NULL, "pg_catalog.pg_type_is_visible(t.oid)"); if (pset.sversion >= 80300) { appendPQExpBuffer(&buf, "UNION ALL\n" " SELECT o.oid as oid, o.tableoid as tableoid,\n" " n.nspname as nspname,\n" " CAST(o.opcname AS pg_catalog.text) as name,\n" " CAST('%s' AS pg_catalog.text) as object\n" " FROM pg_catalog.pg_opclass o\n" " JOIN pg_catalog.pg_am am ON " "o.opcmethod = am.oid\n" " JOIN pg_catalog.pg_namespace n ON " "n.oid = o.opcnamespace\n", gettext_noop("operator class")); if (!showSystem && !pattern) appendPQExpBufferStr(&buf, " AND n.nspname <> 'pg_catalog'\n" " AND n.nspname <> 'information_schema'\n"); processSQLNamePattern(pset.db, &buf, pattern, true, false, "n.nspname", "o.opcname", NULL, "pg_catalog.pg_opclass_is_visible(o.oid)"); } if (pset.sversion >= 90200) { appendPQExpBuffer(&buf, "UNION ALL\n" " SELECT opf.oid as oid, opf.tableoid as tableoid,\n" " n.nspname as nspname,\n" " CAST(opf.opfname AS pg_catalog.text) AS name,\n" " CAST('%s' AS pg_catalog.text) as object\n" " FROM pg_catalog.pg_opfamily opf\n" " JOIN pg_catalog.pg_am am " "ON opf.opfmethod = am.oid\n" " JOIN pg_catalog.pg_namespace n " "ON opf.opfnamespace = n.oid\n", gettext_noop("operator family")); if (!showSystem && !pattern) appendPQExpBufferStr(&buf, " AND n.nspname <> 'pg_catalog'\n" " AND n.nspname <> 'information_schema'\n"); processSQLNamePattern(pset.db, &buf, pattern, true, false, "n.nspname", "opf.opfname", NULL, "pg_catalog.pg_opfamily_is_visible(opf.oid)"); } appendPQExpBuffer(&buf, "UNION ALL\n" " SELECT r.oid as oid, r.tableoid as tableoid,\n" " n.nspname as nspname,\n" " CAST(r.rulename AS pg_catalog.text) as name," " CAST('%s' AS pg_catalog.text) as object\n" " FROM pg_catalog.pg_rewrite r\n" " JOIN pg_catalog.pg_class c ON c.oid = r.ev_class\n" " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n" " WHERE r.rulename != '_RETURN'\n", gettext_noop("rule")); if (!showSystem && !pattern) appendPQExpBufferStr(&buf, " AND n.nspname <> 'pg_catalog'\n" " AND n.nspname <> 'information_schema'\n"); processSQLNamePattern(pset.db, &buf, pattern, true, false, "n.nspname", "r.rulename", NULL, "pg_catalog.pg_table_is_visible(c.oid)"); appendPQExpBuffer(&buf, "UNION ALL\n" " SELECT t.oid as oid, t.tableoid as tableoid,\n" " n.nspname as nspname,\n" " CAST(t.tgname AS pg_catalog.text) as name," " CAST('%s' AS pg_catalog.text) as object\n" " FROM pg_catalog.pg_trigger t\n" " JOIN pg_catalog.pg_class c ON c.oid = t.tgrelid\n" " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n", gettext_noop("trigger")); if (!showSystem && !pattern) appendPQExpBufferStr(&buf, "WHERE n.nspname <> 'pg_catalog'\n" " AND n.nspname <> 'information_schema'\n"); processSQLNamePattern(pset.db, &buf, pattern, !showSystem && !pattern, false, "n.nspname", "t.tgname", NULL, "pg_catalog.pg_table_is_visible(c.oid)"); appendPQExpBufferStr(&buf, ") AS tt\n" " JOIN pg_catalog.pg_description d ON (tt.oid = d.objoid AND tt.tableoid = d.classoid AND d.objsubid = 0)\n"); appendPQExpBufferStr(&buf, "ORDER BY 1, 2, 3;"); res = PSQLexec(buf.data); termPQExpBuffer(&buf); if (!res) return false; myopt.nullPrint = NULL; myopt.title = _("Object descriptions"); myopt.translate_header = true; myopt.translate_columns = translate_columns; myopt.n_translate_columns = lengthof(translate_columns); printQuery(res, &myopt, pset.queryFout, pset.logfile); PQclear(res); return true; }
/* Disable (mask) the specified IRQ */ void disable_irq(uint32_t irq_num) { int flags; cli_and_save(flags); if (irq_num < 8) cached_21 |= 1 << irq_num; else{ cached_A1 |= 1 << (irq_num-8); cached_21 |= 1 << 2; } outb(cached_A1, SLAVE_8259_PORT+1); outb(cached_21, MASTER_8259_PORT+1); restore_flags(flags); sti(); }
/* * Tdecode does the grung work to decode the * string capability escapes. */ static char * tdecode(char *str, char **area) { char *cp; int c; char *dp; int i; cp = *area; while ((c = *str++) != 0 && c != ':') { switch (c) { case '^': c = *str++ & 037; break; case '\\': dp = "E\033^^\\\\::n\nr\rt\tb\bf\f"; c = *str++; nextc: if (*dp++ == c) { c = *dp++; break; } dp++; if (*dp) goto nextc; if (isdigit(c)) { c -= '0', i = 2; do c <<= 3, c |= *str++ - '0'; while (--i && isdigit(*str)) ; } break; } *cp++ = c; } *cp++ = 0; str = *area; *area = cp; return (str); }
/* Metric set RasterizerAndPixelBackend :: HS Threads Dispatched */ static uint64_t sklgt3__rasterizer_and_pixel_backend__hs_threads__read(struct brw_context *brw, const struct brw_perf_query_info *query, uint64_t *accumulator) { uint64_t tmp0 = accumulator[query->a_offset + 2]; return tmp0; }
//------------------------------------------------------------------------------ /* This file is part of rippled: https://github.com/ripple/rippled Copyright (c) 2012, 2013 Ripple Labs Inc. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ //============================================================================== #ifndef RIPPLE_PEERFINDER_HANDOUTS_H_INCLUDED #define RIPPLE_PEERFINDER_HANDOUTS_H_INCLUDED #include <ripple/beast/container/aged_set.h> #include <ripple/peerfinder/impl/SlotImp.h> #include <ripple/peerfinder/impl/Tuning.h> #include <cassert> #include <iterator> #include <type_traits> namespace ripple { namespace PeerFinder { namespace detail { /** Try to insert one object in the target. When an item is handed out it is moved to the end of the container. @return The number of objects inserted */ // VFALCO TODO specialization that handles std::list for SequenceContainer // using splice for optimization over erase/push_back // template <class Target, class HopContainer> std::size_t handout_one(Target& t, HopContainer& h) { assert(!t.full()); for (auto it = h.begin(); it != h.end(); ++it) { auto const& e = *it; if (t.try_insert(e)) { h.move_back(it); return 1; } } return 0; } } // namespace detail /** Distributes objects to targets according to business rules. A best effort is made to evenly distribute items in the sequence container list into the target sequence list. */ template <class TargetFwdIter, class SeqFwdIter> void handout( TargetFwdIter first, TargetFwdIter last, SeqFwdIter seq_first, SeqFwdIter seq_last) { for (;;) { std::size_t n(0); for (auto si = seq_first; si != seq_last; ++si) { auto c = *si; bool all_full(true); for (auto ti = first; ti != last; ++ti) { auto& t = *ti; if (!t.full()) { n += detail::handout_one(t, c); all_full = false; } } if (all_full) return; } if (!n) break; } } //------------------------------------------------------------------------------ /** Receives handouts for redirecting a connection. An incoming connection request is redirected when we are full on slots. */ class RedirectHandouts { public: template <class = void> explicit RedirectHandouts(SlotImp::ptr const& slot); template <class = void> bool try_insert(Endpoint const& ep); bool full() const { return list_.size() >= Tuning::redirectEndpointCount; } SlotImp::ptr const& slot() const { return slot_; } std::vector<Endpoint>& list() { return list_; } std::vector<Endpoint> const& list() const { return list_; } private: SlotImp::ptr slot_; std::vector<Endpoint> list_; }; template <class> RedirectHandouts::RedirectHandouts(SlotImp::ptr const& slot) : slot_(slot) { list_.reserve(Tuning::redirectEndpointCount); } template <class> bool RedirectHandouts::try_insert(Endpoint const& ep) { if (full()) return false; // VFALCO NOTE This check can be removed when we provide the // addresses in a peer HTTP handshake instead of // the tmENDPOINTS message. // if (ep.hops > Tuning::maxHops) return false; // Don't send them our address if (ep.hops == 0) return false; // Don't send them their own address if (slot_->remote_endpoint().address() == ep.address.address()) return false; // Make sure the address isn't already in our list if (std::any_of(list_.begin(), list_.end(), [&ep](Endpoint const& other) { // Ignore port for security reasons return other.address.address() == ep.address.address(); })) { return false; } list_.emplace_back(ep.address, ep.hops); return true; } //------------------------------------------------------------------------------ /** Receives endpoints for a slot during periodic handouts. */ class SlotHandouts { public: template <class = void> explicit SlotHandouts(SlotImp::ptr const& slot); template <class = void> bool try_insert(Endpoint const& ep); bool full() const { return list_.size() >= Tuning::numberOfEndpoints; } void insert(Endpoint const& ep) { list_.push_back(ep); } SlotImp::ptr const& slot() const { return slot_; } std::vector<Endpoint> const& list() const { return list_; } private: SlotImp::ptr slot_; std::vector<Endpoint> list_; }; template <class> SlotHandouts::SlotHandouts(SlotImp::ptr const& slot) : slot_(slot) { list_.reserve(Tuning::numberOfEndpoints); } template <class> bool SlotHandouts::try_insert(Endpoint const& ep) { if (full()) return false; if (ep.hops > Tuning::maxHops) return false; if (slot_->recent.filter(ep.address, ep.hops)) return false; // Don't send them their own address if (slot_->remote_endpoint().address() == ep.address.address()) return false; // Make sure the address isn't already in our list if (std::any_of(list_.begin(), list_.end(), [&ep](Endpoint const& other) { // Ignore port for security reasons return other.address.address() == ep.address.address(); })) return false; list_.emplace_back(ep.address, ep.hops); // Insert into this slot's recent table. Although the endpoint // didn't come from the slot, adding it to the slot's table // prevents us from sending it again until it has expired from // the other end's cache. // slot_->recent.insert(ep.address, ep.hops); return true; } //------------------------------------------------------------------------------ /** Receives handouts for making automatic connections. */ class ConnectHandouts { public: // Keeps track of addresses we have made outgoing connections // to, for the purposes of not connecting to them too frequently. using Squelches = beast::aged_set<beast::IP::Address>; using list_type = std::vector<beast::IP::Endpoint>; private: std::size_t m_needed; Squelches& m_squelches; list_type m_list; public: template <class = void> ConnectHandouts(std::size_t needed, Squelches& squelches); template <class = void> bool try_insert(beast::IP::Endpoint const& endpoint); bool empty() const { return m_list.empty(); } bool full() const { return m_list.size() >= m_needed; } bool try_insert(Endpoint const& endpoint) { return try_insert(endpoint.address); } list_type& list() { return m_list; } list_type const& list() const { return m_list; } }; template <class> ConnectHandouts::ConnectHandouts(std::size_t needed, Squelches& squelches) : m_needed(needed), m_squelches(squelches) { m_list.reserve(needed); } template <class> bool ConnectHandouts::try_insert(beast::IP::Endpoint const& endpoint) { if (full()) return false; // Make sure the address isn't already in our list if (std::any_of( m_list.begin(), m_list.end(), [&endpoint](beast::IP::Endpoint const& other) { // Ignore port for security reasons return other.address() == endpoint.address(); })) { return false; } // Add to squelch list so we don't try it too often. // If its already there, then make try_insert fail. auto const result(m_squelches.insert(endpoint.address())); if (!result.second) return false; m_list.push_back(endpoint); return true; } } // namespace PeerFinder } // namespace ripple #endif
#include<stdio.h> #include<stdlib.h> #define size 100005 main() { int n,i; scanf("%d",&n); long int a[size]; long long int count=0,max=-90; for(i=0;i<n;++i) { scanf("%ld",&a[i]); } if(n==1) { printf("1"); exit(0); } for(i=2;i<n;++i) { if(a[i]==(a[i-1]+a[i-2])) { count++; } else { count+=2; if(count>max) max=count; count=0; } } count+=2; if(count>max) max=count; printf("%I64d",max); }
/* resize image to DNN network input size and convert to fp32. */ void feed_palm_detection_image(texture_2d_t *srctex, int win_w, int win_h) { int x, y, w, h; float *buf_fp32 = (float *)get_palm_detection_input_buf (&w, &h); unsigned char *buf_ui8 = NULL; static unsigned char *pui8 = NULL; if (pui8 == NULL) pui8 = (unsigned char *)malloc(w * h * 4); buf_ui8 = pui8; draw_2d_texture_ex (srctex, 0, win_h - h, w, h, 1); glPixelStorei (GL_PACK_ALIGNMENT, 4); glReadPixels (0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, buf_ui8); float mean = 128.0f; float std = 128.0f; for (y = 0; y < h; y ++) { for (x = 0; x < w; x ++) { int r = *buf_ui8 ++; int g = *buf_ui8 ++; int b = *buf_ui8 ++; buf_ui8 ++; *buf_fp32 ++ = (float)(r - mean) / std; *buf_fp32 ++ = (float)(g - mean) / std; *buf_fp32 ++ = (float)(b - mean) / std; } } return; }